diff --git a/.github/lockdown.yml b/.github/lockdown.yml deleted file mode 100644 index d3546bd2bc..0000000000 --- a/.github/lockdown.yml +++ /dev/null @@ -1,34 +0,0 @@ -# Configuration for Repo Lockdown - https://github.com/dessant/repo-lockdown - -# Close issues and pull requests -close: true - -# Lock issues and pull requests -lock: true - -issues: - comment: | - Thank you for your interest in the QEMU project. - - This repository is a read-only mirror of the project's repostories hosted - at https://gitlab.com/qemu-project/qemu.git. - The project does not process issues filed on GitHub. - - The project issues are tracked on GitLab: - https://gitlab.com/qemu-project/qemu/-/issues - - QEMU welcomes bug report contributions. You can file new ones on: - https://gitlab.com/qemu-project/qemu/-/issues/new - -pulls: - comment: | - Thank you for your interest in the QEMU project. - - This repository is a read-only mirror of the project's repostories hosted - on https://gitlab.com/qemu-project/qemu.git. - The project does not process merge requests filed on GitHub. - - QEMU welcomes contributions of code (either fixing bugs or adding new - functionality). However, we get a lot of patches, and so we have some - guidelines about contributing on the project website: - https://www.qemu.org/contribute/ diff --git a/.github/workflows/lockdown.yml b/.github/workflows/lockdown.yml new file mode 100644 index 0000000000..ad8b8f7e30 --- /dev/null +++ b/.github/workflows/lockdown.yml @@ -0,0 +1,30 @@ +# Configuration for Repo Lockdown - https://github.com/dessant/repo-lockdown + +name: 'Repo Lockdown' + +on: + pull_request_target: + types: opened + +permissions: + pull-requests: write + +jobs: + action: + runs-on: ubuntu-latest + steps: + - uses: dessant/repo-lockdown@v2 + with: + pull-comment: | + Thank you for your interest in the QEMU project. + + This repository is a read-only mirror of the project's repostories hosted + on https://gitlab.com/qemu-project/qemu.git. + The project does not process merge requests filed on GitHub. + + QEMU welcomes contributions of code (either fixing bugs or adding new + functionality). However, we get a lot of patches, and so we have some + guidelines about contributing on the project website: + https://www.qemu.org/contribute/ + lock-pull: true + close-pull: true diff --git a/.gitlab-ci.d/buildtest-template.yml b/.gitlab-ci.d/buildtest-template.yml index fcbcc4e627..2c7980a4f6 100644 --- a/.gitlab-ci.d/buildtest-template.yml +++ b/.gitlab-ci.d/buildtest-template.yml @@ -37,7 +37,7 @@ # Avoid recompiling by hiding ninja with NINJA=":" - make NINJA=":" $MAKE_CHECK_ARGS -.acceptance_test_job_template: +.avocado_test_job_template: extends: .native_test_job_template cache: key: "${CI_JOB_NAME}-cache" diff --git a/.gitlab-ci.d/buildtest.yml b/.gitlab-ci.d/buildtest.yml index 5c378e35f9..71d0f407ad 100644 --- a/.gitlab-ci.d/buildtest.yml +++ b/.gitlab-ci.d/buildtest.yml @@ -26,14 +26,14 @@ check-system-alpine: IMAGE: alpine MAKE_CHECK_ARGS: check -acceptance-system-alpine: - extends: .acceptance_test_job_template +avocado-system-alpine: + extends: .avocado_test_job_template needs: - job: build-system-alpine artifacts: true variables: IMAGE: alpine - MAKE_CHECK_ARGS: check-acceptance + MAKE_CHECK_ARGS: check-avocado build-system-ubuntu: extends: .native_build_job_template @@ -59,14 +59,14 @@ check-system-ubuntu: IMAGE: ubuntu2004 MAKE_CHECK_ARGS: check -acceptance-system-ubuntu: - extends: .acceptance_test_job_template +avocado-system-ubuntu: + extends: .avocado_test_job_template needs: - job: build-system-ubuntu artifacts: true variables: IMAGE: ubuntu2004 - MAKE_CHECK_ARGS: check-acceptance + MAKE_CHECK_ARGS: check-avocado build-system-debian: extends: .native_build_job_template @@ -91,14 +91,14 @@ check-system-debian: IMAGE: debian-amd64 MAKE_CHECK_ARGS: check -acceptance-system-debian: - extends: .acceptance_test_job_template +avocado-system-debian: + extends: .avocado_test_job_template needs: - job: build-system-debian artifacts: true variables: IMAGE: debian-amd64 - MAKE_CHECK_ARGS: check-acceptance + MAKE_CHECK_ARGS: check-avocado build-system-fedora: extends: .native_build_job_template @@ -125,14 +125,14 @@ check-system-fedora: IMAGE: fedora MAKE_CHECK_ARGS: check -acceptance-system-fedora: - extends: .acceptance_test_job_template +avocado-system-fedora: + extends: .avocado_test_job_template needs: - job: build-system-fedora artifacts: true variables: IMAGE: fedora - MAKE_CHECK_ARGS: check-acceptance + MAKE_CHECK_ARGS: check-avocado build-system-centos: extends: .native_build_job_template @@ -159,14 +159,14 @@ check-system-centos: IMAGE: centos8 MAKE_CHECK_ARGS: check -acceptance-system-centos: - extends: .acceptance_test_job_template +avocado-system-centos: + extends: .avocado_test_job_template needs: - job: build-system-centos artifacts: true variables: IMAGE: centos8 - MAKE_CHECK_ARGS: check-acceptance + MAKE_CHECK_ARGS: check-avocado build-system-opensuse: extends: .native_build_job_template @@ -191,14 +191,14 @@ check-system-opensuse: IMAGE: opensuse-leap MAKE_CHECK_ARGS: check -acceptance-system-opensuse: - extends: .acceptance_test_job_template +avocado-system-opensuse: + extends: .avocado_test_job_template needs: - job: build-system-opensuse artifacts: true variables: IMAGE: opensuse-leap - MAKE_CHECK_ARGS: check-acceptance + MAKE_CHECK_ARGS: check-avocado # This jobs explicitly disable TCG (--disable-tcg), KVM is detected by @@ -317,7 +317,7 @@ clang-user: # This can be accomplished by using -enable-slirp=git, which avoids the use of # a system-wide version of the library # -# Split in three sets of build/check/acceptance to limit the execution time of each +# Split in three sets of build/check/avocado to limit the execution time of each # job build-cfi-aarch64: extends: .native_build_job_template @@ -352,14 +352,14 @@ check-cfi-aarch64: IMAGE: fedora MAKE_CHECK_ARGS: check -acceptance-cfi-aarch64: - extends: .acceptance_test_job_template +avocado-cfi-aarch64: + extends: .avocado_test_job_template needs: - job: build-cfi-aarch64 artifacts: true variables: IMAGE: fedora - MAKE_CHECK_ARGS: check-acceptance + MAKE_CHECK_ARGS: check-avocado build-cfi-ppc64-s390x: extends: .native_build_job_template @@ -394,14 +394,14 @@ check-cfi-ppc64-s390x: IMAGE: fedora MAKE_CHECK_ARGS: check -acceptance-cfi-ppc64-s390x: - extends: .acceptance_test_job_template +avocado-cfi-ppc64-s390x: + extends: .avocado_test_job_template needs: - job: build-cfi-ppc64-s390x artifacts: true variables: IMAGE: fedora - MAKE_CHECK_ARGS: check-acceptance + MAKE_CHECK_ARGS: check-avocado build-cfi-x86_64: extends: .native_build_job_template @@ -430,14 +430,14 @@ check-cfi-x86_64: IMAGE: fedora MAKE_CHECK_ARGS: check -acceptance-cfi-x86_64: - extends: .acceptance_test_job_template +avocado-cfi-x86_64: + extends: .avocado_test_job_template needs: - job: build-cfi-x86_64 artifacts: true variables: IMAGE: fedora - MAKE_CHECK_ARGS: check-acceptance + MAKE_CHECK_ARGS: check-avocado tsan-build: extends: .native_build_job_template @@ -575,7 +575,6 @@ build-without-default-features: CONFIGURE_ARGS: --without-default-features --disable-capstone - --disable-fdt --disable-pie --disable-qom-cast-debug --disable-slirp diff --git a/.gitlab-ci.d/cirrus.yml b/.gitlab-ci.d/cirrus.yml index 675db69622..e7b25e7427 100644 --- a/.gitlab-ci.d/cirrus.yml +++ b/.gitlab-ci.d/cirrus.yml @@ -35,7 +35,7 @@ -e "s|[@]PIP3@|$PIP3|g" -e "s|[@]PYPI_PKGS@|$PYPI_PKGS|g" -e "s|[@]CONFIGURE_ARGS@|$CONFIGURE_ARGS|g" - -e "s|[@]TEST_TARGETSS@|$TEST_TARGETSS|g" + -e "s|[@]TEST_TARGETS@|$TEST_TARGETS|g" <.gitlab-ci.d/cirrus/build.yml >.gitlab-ci.d/cirrus/$NAME.yml - cat .gitlab-ci.d/cirrus/$NAME.yml - cirrus-run -v --show-build-log always .gitlab-ci.d/cirrus/$NAME.yml diff --git a/.gitlab-ci.d/cirrus/build.yml b/.gitlab-ci.d/cirrus/build.yml index 857bdc5536..c555f5d36e 100644 --- a/.gitlab-ci.d/cirrus/build.yml +++ b/.gitlab-ci.d/cirrus/build.yml @@ -13,6 +13,7 @@ env: PYTHON: "@PYTHON@" MAKE: "@MAKE@" CONFIGURE_ARGS: "@CONFIGURE_ARGS@" + TEST_TARGETS: "@TEST_TARGETS@" build_task: install_script: diff --git a/.gitlab-ci.d/container-cross.yml b/.gitlab-ci.d/container-cross.yml index 0fcebe363a..a3b5b90552 100644 --- a/.gitlab-ci.d/container-cross.yml +++ b/.gitlab-ci.d/container-cross.yml @@ -134,7 +134,8 @@ ppc64el-debian-cross-container: riscv64-debian-cross-container: extends: .container_job_template stage: containers-layer2 - needs: ['amd64-debian10-container'] + # as we are currently based on 'sid/unstable' we may break so... + allow_failure: true variables: NAME: debian-riscv64-cross diff --git a/.gitlab-ci.d/crossbuilds.yml b/.gitlab-ci.d/crossbuilds.yml index f10168db2e..17d6cb3e45 100644 --- a/.gitlab-ci.d/crossbuilds.yml +++ b/.gitlab-ci.d/crossbuilds.yml @@ -124,6 +124,25 @@ cross-ppc64el-user: variables: IMAGE: debian-ppc64el-cross +# The riscv64 cross-builds currently use a 'sid' container to get +# compilers and libraries. Until something more stable is found we +# allow_failure so as not to block CI. +cross-riscv64-system: + extends: .cross_system_build_job + allow_failure: true + needs: + job: riscv64-debian-cross-container + variables: + IMAGE: debian-riscv64-cross + +cross-riscv64-user: + extends: .cross_user_build_job + allow_failure: true + needs: + job: riscv64-debian-cross-container + variables: + IMAGE: debian-riscv64-cross + cross-s390x-system: extends: .cross_system_build_job needs: diff --git a/.gitlab-ci.d/edk2.yml b/.gitlab-ci.d/edk2.yml index 62497ba47f..13d0f8b019 100644 --- a/.gitlab-ci.d/edk2.yml +++ b/.gitlab-ci.d/edk2.yml @@ -50,7 +50,11 @@ build-edk2: GIT_DEPTH: 3 script: # Clone the required submodules and build EDK2 - git submodule update --init roms/edk2 - - git -C roms/edk2 submodule update --init + - git -C roms/edk2 submodule update --init -- + ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3 + BaseTools/Source/C/BrotliCompress/brotli + CryptoPkg/Library/OpensslLib/openssl + MdeModulePkg/Library/BrotliCustomDecompressLib/brotli - export JOBS=$(($(getconf _NPROCESSORS_ONLN) + 1)) - echo "=== Using ${JOBS} simultaneous jobs ===" - make -j${JOBS} -C roms efi 2>&1 1>edk2-stdout.log | tee -a edk2-stderr.log >&2 diff --git a/.gitlab-ci.d/static_checks.yml b/.gitlab-ci.d/static_checks.yml index 96dbd9e310..902843f8b3 100644 --- a/.gitlab-ci.d/static_checks.yml +++ b/.gitlab-ci.d/static_checks.yml @@ -8,7 +8,7 @@ check-patch: variables: GIT_DEPTH: 1000 rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH' + - if: '$CI_PROJECT_NAMESPACE == "qemu-project"' when: never - when: on_success allow_failure: true diff --git a/.mailmap b/.mailmap index f029d1c21f..8beb2f95ae 100644 --- a/.mailmap +++ b/.mailmap @@ -69,6 +69,7 @@ Yongbok Kim # git author config, or had utf8/latin1 encoding issues. Aaron Lindsay Alexey Gerasimenko +Alex Chen Alex Ivanov Andreas Färber Bandan Das @@ -99,9 +100,11 @@ Gautham R. Shenoy Gautham R. Shenoy Gonglei (Arei) Guang Wang +Haibin Zhang Hailiang Zhang Hanna Reitz Hervé Poussineau +Hyman Huang Jakub Jermář Jakub Jermář Jean-Christophe Dubois @@ -135,6 +138,7 @@ Nicholas Thomas Nikunj A Dadhania Orit Wasserman Paolo Bonzini +Pan Nengyuan Pavel Dovgaluk Pavel Dovgaluk Pavel Dovgaluk diff --git a/.travis.yml b/.travis.yml index 0faddf7b4e..41010ebe6b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -305,26 +305,3 @@ jobs: - CONFIG="--disable-containers --disable-tcg --enable-kvm --disable-tools --host-cc=clang --cxx=clang++" - UNRELIABLE=true - - # Release builds - # The make-release script expect a QEMU version, so our tag must start with a 'v'. - # This is the case when release candidate tags are created. - - name: "Release tarball" - if: tag IS present AND tag =~ /^v\d+\.\d+(\.\d+)?(-\S*)?$/ - env: - # We want to build from the release tarball - - BUILD_DIR="release/build/dir" SRC_DIR="../../.." - - BASE_CONFIG="--prefix=$PWD/dist" - - CONFIG="--target-list=x86_64-softmmu,aarch64-softmmu,armeb-linux-user,ppc-linux-user" - - TEST_CMD="make install -j${JOBS}" - - QEMU_VERSION="${TRAVIS_TAG:1}" - - CACHE_NAME="${TRAVIS_BRANCH}-linux-gcc-default" - script: - - make -C ${SRC_DIR} qemu-${QEMU_VERSION}.tar.bz2 - - ls -l ${SRC_DIR}/qemu-${QEMU_VERSION}.tar.bz2 - - tar -xf ${SRC_DIR}/qemu-${QEMU_VERSION}.tar.bz2 && cd qemu-${QEMU_VERSION} - - mkdir -p release-build && cd release-build - - ../configure ${BASE_CONFIG} ${CONFIG} || { cat config.log meson-logs/meson-log.txt && exit 1; } - - make install - allow_failures: - - env: UNRELIABLE=true diff --git a/Kconfig.host b/Kconfig.host index 24255ef441..60b9c07b5e 100644 --- a/Kconfig.host +++ b/Kconfig.host @@ -41,3 +41,7 @@ config PVRDMA config MULTIPROCESS_ALLOWED bool imply MULTIPROCESS + +config FUZZ + bool + select SPARSE_MEM diff --git a/MAINTAINERS b/MAINTAINERS index bf1fc5b21e..d3879aa3c1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -109,6 +109,12 @@ K: ^Subject:.*(?i)s390x? T: git https://gitlab.com/cohuck/qemu.git s390-next L: qemu-s390x@nongnu.org +MIPS general architecture support +M: Philippe Mathieu-Daudé +R: Jiaxun Yang +S: Odd Fixes +K: ^Subject:.*(?i)mips + Guest CPU cores (TCG) --------------------- Overall TCG CPUs @@ -171,7 +177,7 @@ L: qemu-arm@nongnu.org S: Maintained F: hw/arm/smmu* F: include/hw/arm/smmu* -F: tests/acceptance/smmu.py +F: tests/avocado/smmu.py AVR TCG CPUs M: Michael Rolnik @@ -179,7 +185,7 @@ S: Maintained F: docs/system/target-avr.rst F: gdb-xml/avr-cpu.xml F: target/avr/ -F: tests/acceptance/machine_avr6.py +F: tests/avocado/machine_avr6.py CRIS TCG CPUs M: Edgar E. Iglesias @@ -205,10 +211,7 @@ HPPA (PA-RISC) TCG CPUs M: Richard Henderson S: Maintained F: target/hppa/ -F: hw/hppa/ F: disas/hppa.c -F: hw/net/*i82596* -F: include/hw/net/lasi_82596.h M68K TCG CPUs M: Laurent Vivier @@ -222,6 +225,8 @@ S: Maintained F: target/microblaze/ F: hw/microblaze/ F: disas/microblaze.c +F: tests/docker/dockerfiles/debian-microblaze-cross.d/build-toolchain.sh +F: tests/tcg/nios2/Makefile.target MIPS TCG CPUs M: Philippe Mathieu-Daudé @@ -230,19 +235,9 @@ R: Jiaxun Yang R: Aleksandar Rikalo S: Odd Fixes F: target/mips/ -F: configs/devices/mips*/* F: disas/mips.c F: docs/system/cpu-models-mips.rst.inc -F: hw/intc/mips_gic.c -F: hw/mips/ -F: hw/misc/mips_* -F: hw/timer/mips_gictimer.c -F: include/hw/intc/mips_gic.h -F: include/hw/mips/ -F: include/hw/misc/mips_* -F: include/hw/timer/mips_gictimer.h F: tests/tcg/mips/ -K: ^Subject:.*(?i)mips MIPS TCG CPUs (nanoMIPS ISA) S: Orphan @@ -257,6 +252,7 @@ F: target/nios2/ F: hw/nios2/ F: disas/nios2.c F: configs/devices/nios2-softmmu/default.mak +F: tests/docker/dockerfiles/debian-nios2-cross.d/build-toolchain.sh OpenRISC TCG CPUs M: Stafford Horne @@ -266,13 +262,16 @@ F: hw/openrisc/ F: tests/tcg/openrisc/ PowerPC TCG CPUs -M: David Gibson -M: Greg Kurz +M: Cédric Le Goater +M: Daniel Henrique Barboza +R: David Gibson +R: Greg Kurz L: qemu-ppc@nongnu.org S: Maintained F: target/ppc/ -F: hw/ppc/ -F: include/hw/ppc/ +F: hw/ppc/ppc.c +F: hw/ppc/ppc_booke.c +F: include/hw/ppc/ppc.h F: disas/ppc.c RISC-V TCG CPUs @@ -385,14 +384,15 @@ F: target/mips/kvm* F: target/mips/sysemu/ PPC KVM CPUs -M: David Gibson -M: Greg Kurz +M: Cédric Le Goater +M: Daniel Henrique Barboza +R: David Gibson +R: Greg Kurz S: Maintained F: target/ppc/kvm.c S390 KVM CPUs M: Halil Pasic -M: Cornelia Huck M: Christian Borntraeger S: Supported F: target/s390x/kvm/ @@ -407,7 +407,6 @@ F: hw/intc/s390_flic.c F: hw/intc/s390_flic_kvm.c F: include/hw/s390x/s390_flic.h F: gdb-xml/s390*.xml -T: git https://gitlab.com/cohuck/qemu.git s390-next T: git https://github.com/borntraeger/qemu.git s390-next L: qemu-s390x@nongnu.org @@ -416,7 +415,10 @@ M: Paolo Bonzini M: Marcelo Tosatti L: kvm@vger.kernel.org S: Supported +F: docs/amd-memory-encryption.txt +F: docs/system/i386/sgx.rst F: target/i386/kvm/ +F: target/i386/sev* F: scripts/kvm/vmxcap Guest CPU Cores (other accelerators) @@ -659,7 +661,7 @@ S: Odd Fixes F: include/hw/arm/digic.h F: hw/*/digic* F: include/hw/*/digic* -F: tests/acceptance/machine_arm_canona1100.py +F: tests/avocado/machine_arm_canona1100.py F: docs/system/arm/digic.rst Goldfish RTC @@ -710,7 +712,7 @@ S: Maintained F: hw/arm/integratorcp.c F: hw/misc/arm_integrator_debug.c F: include/hw/misc/arm_integrator_debug.h -F: tests/acceptance/machine_arm_integratorcp.py +F: tests/avocado/machine_arm_integratorcp.py F: docs/system/arm/integratorcp.rst MCIMX6UL EVK / i.MX6ul @@ -807,7 +809,7 @@ F: include/hw/display/blizzard.h F: include/hw/input/lm832x.h F: include/hw/input/tsc2xxx.h F: include/hw/misc/cbus.h -F: tests/acceptance/machine_arm_n8x0.py +F: tests/avocado/machine_arm_n8x0.py F: docs/system/arm/nseries.rst Palm @@ -1097,6 +1099,8 @@ R: Helge Deller S: Odd Fixes F: configs/devices/hppa-softmmu/default.mak F: hw/hppa/ +F: hw/net/*i82596* +F: include/hw/net/lasi_82596.h F: pc-bios/hppa-firmware.img M68K Machines @@ -1159,7 +1163,7 @@ M: Edgar E. Iglesias S: Maintained F: hw/microblaze/petalogix_s3adsp1800_mmu.c F: include/hw/char/xilinx_uartlite.h -F: tests/acceptance/machine_microblaze.py +F: tests/avocado/machine_microblaze.py petalogix_ml605 M: Edgar E. Iglesias @@ -1168,6 +1172,13 @@ F: hw/microblaze/petalogix_ml605_mmu.c MIPS Machines ------------- +Overall MIPS Machines +M: Philippe Mathieu-Daudé +S: Odd Fixes +F: configs/devices/mips*/* +F: hw/mips/ +F: include/hw/mips/ + Jazz M: Hervé Poussineau R: Aleksandar Rikalo @@ -1185,8 +1196,8 @@ F: hw/acpi/piix4.c F: hw/mips/malta.c F: hw/mips/gt64xxx_pci.c F: include/hw/southbridge/piix.h -F: tests/acceptance/linux_ssh_mips_malta.py -F: tests/acceptance/machine_mips_malta.py +F: tests/avocado/linux_ssh_mips_malta.py +F: tests/avocado/machine_mips_malta.py Mipssim R: Aleksandar Rikalo @@ -1204,7 +1215,7 @@ F: hw/isa/vt82c686.c F: hw/pci-host/bonito.c F: hw/usb/vt82c686-uhci-pci.c F: include/hw/isa/vt82c686.h -F: tests/acceptance/machine_mips_fuloong2e.py +F: tests/avocado/machine_mips_fuloong2e.py Loongson-3 virtual platforms M: Huacai Chen @@ -1214,7 +1225,7 @@ F: hw/intc/loongson_liointc.c F: hw/mips/loongson3_bootp.c F: hw/mips/loongson3_bootp.h F: hw/mips/loongson3_virt.c -F: tests/acceptance/machine_mips_loongson3v.py +F: tests/avocado/machine_mips_loongson3v.py Boston M: Paul Burton @@ -1235,24 +1246,19 @@ F: hw/openrisc/openrisc_sim.c PowerPC Machines ---------------- 405 -M: David Gibson -M: Greg Kurz L: qemu-ppc@nongnu.org -S: Odd Fixes +S: Orphan F: hw/ppc/ppc405_boards.c Bamboo -M: David Gibson -M: Greg Kurz L: qemu-ppc@nongnu.org -S: Odd Fixes +S: Orphan F: hw/ppc/ppc440_bamboo.c +F: tests/avocado/ppc_bamboo.py e500 -M: David Gibson -M: Greg Kurz L: qemu-ppc@nongnu.org -S: Odd Fixes +S: Orphan F: hw/ppc/e500* F: hw/gpio/mpc8xxx.c F: hw/i2c/mpc_i2c.c @@ -1261,20 +1267,18 @@ F: hw/pci-host/ppce500.c F: include/hw/ppc/ppc_e500.h F: include/hw/pci-host/ppce500.h F: pc-bios/u-boot.e500 +F: hw/intc/openpic_kvm.h +F: include/hw/ppc/openpic_kvm.h mpc8544ds -M: David Gibson -M: Greg Kurz L: qemu-ppc@nongnu.org -S: Odd Fixes +S: Orphan F: hw/ppc/mpc8544ds.c F: hw/ppc/mpc8544_guts.c -F: tests/acceptance/ppc_mpc8544ds.py +F: tests/avocado/ppc_mpc8544ds.py New World (mac99) M: Mark Cave-Ayland -R: David Gibson -R: Greg Kurz L: qemu-ppc@nongnu.org S: Odd Fixes F: hw/ppc/mac_newworld.c @@ -1293,8 +1297,6 @@ F: pc-bios/qemu_vga.ndrv Old World (g3beige) M: Mark Cave-Ayland -R: David Gibson -R: Greg Kurz L: qemu-ppc@nongnu.org S: Odd Fixes F: hw/ppc/mac_oldworld.c @@ -1308,8 +1310,6 @@ F: pc-bios/qemu_vga.ndrv PReP M: Hervé Poussineau -R: David Gibson -R: Greg Kurz L: qemu-ppc@nongnu.org S: Maintained F: hw/ppc/prep.c @@ -1322,13 +1322,15 @@ F: hw/dma/i82374.c F: hw/rtc/m48t59-isa.c F: include/hw/isa/pc87312.h F: include/hw/rtc/m48t59.h -F: tests/acceptance/ppc_prep_40p.py +F: tests/avocado/ppc_prep_40p.py sPAPR -M: David Gibson -M: Greg Kurz +M: Cédric Le Goater +M: Daniel Henrique Barboza +R: David Gibson +R: Greg Kurz L: qemu-ppc@nongnu.org -S: Supported +S: Maintained F: hw/*/spapr* F: include/hw/*/spapr* F: hw/*/xics* @@ -1340,12 +1342,10 @@ F: tests/qtest/spapr* F: tests/qtest/libqos/*spapr* F: tests/qtest/rtas* F: tests/qtest/libqos/rtas* -F: tests/acceptance/ppc_pseries.py +F: tests/avocado/ppc_pseries.py PowerNV (Non-Virtualized) M: Cédric Le Goater -M: David Gibson -M: Greg Kurz L: qemu-ppc@nongnu.org S: Maintained F: hw/ppc/pnv* @@ -1362,12 +1362,10 @@ M: Edgar E. Iglesias L: qemu-ppc@nongnu.org S: Odd Fixes F: hw/ppc/virtex_ml507.c -F: tests/acceptance/ppc_virtex_ml507.py +F: tests/avocado/ppc_virtex_ml507.py sam460ex M: BALATON Zoltan -R: David Gibson -R: Greg Kurz L: qemu-ppc@nongnu.org S: Maintained F: hw/ppc/sam460ex.c @@ -1381,7 +1379,6 @@ F: roms/u-boot-sam460ex pegasos2 M: BALATON Zoltan -R: David Gibson L: qemu-ppc@nongnu.org S: Maintained F: hw/ppc/pegasos2.c @@ -1391,6 +1388,8 @@ F: include/hw/pci-host/mv64361.h Virtual Open Firmware (VOF) M: Alexey Kardashevskiy +R: Cédric Le Goater +R: Daniel Henrique Barboza R: David Gibson R: Greg Kurz L: qemu-ppc@nongnu.org @@ -1452,7 +1451,7 @@ R: Yoshinori Sato S: Orphan F: docs/system/target-rx.rst F: hw/rx/rx-gdbsim.c -F: tests/acceptance/machine_rx_gdbsim.py +F: tests/avocado/machine_rx_gdbsim.py SH4 Machines ------------ @@ -1506,7 +1505,7 @@ F: include/hw/pci-host/sabre.h F: hw/pci-bridge/simba.c F: include/hw/pci-bridge/simba.h F: pc-bios/openbios-sparc64 -F: tests/acceptance/machine_sparc64_sun4u.py +F: tests/avocado/machine_sparc64_sun4u.py Sun4v M: Artyom Tarasenko @@ -1522,12 +1521,11 @@ S: Maintained F: hw/sparc/leon3.c F: hw/*/grlib* F: include/hw/*/grlib* -F: tests/acceptance/machine_sparc_leon3.py +F: tests/avocado/machine_sparc_leon3.py S390 Machines ------------- S390 Virtio-ccw -M: Cornelia Huck M: Halil Pasic M: Christian Borntraeger S: Supported @@ -1538,8 +1536,7 @@ F: include/hw/s390x/ F: hw/watchdog/wdt_diag288.c F: include/hw/watchdog/wdt_diag288.h F: configs/devices/s390x-softmmu/default.mak -F: tests/acceptance/machine_s390_ccw_virtio.py -T: git https://gitlab.com/cohuck/qemu.git s390-next +F: tests/avocado/machine_s390_ccw_virtio.py T: git https://github.com/borntraeger/qemu.git s390-next L: qemu-s390x@nongnu.org @@ -1625,7 +1622,7 @@ microvm M: Sergio Lopez M: Paolo Bonzini S: Maintained -F: docs/microvm.rst +F: docs/system/i386/microvm.rst F: hw/i386/microvm.c F: include/hw/i386/microvm.h F: pc-bios/bios-microvm.bin @@ -1633,11 +1630,13 @@ F: pc-bios/bios-microvm.bin Machine core M: Eduardo Habkost M: Marcel Apfelbaum +R: Philippe Mathieu-Daudé S: Supported F: cpu.c F: hw/core/cpu.c F: hw/core/machine-qmp-cmds.c F: hw/core/machine.c +F: hw/core/machine-smp.c F: hw/core/null-machine.c F: hw/core/numa.c F: hw/cpu/cluster.c @@ -1647,6 +1646,7 @@ F: include/hw/boards.h F: include/hw/core/cpu.h F: include/hw/cpu/cluster.h F: include/sysemu/numa.h +F: tests/unit/test-smp-parse.c T: git https://gitlab.com/ehabkost/qemu.git machine-next Xtensa Machines @@ -1785,9 +1785,8 @@ F: include/hw/acpi/ghes.h F: docs/specs/acpi_hest_ghes.rst ppc4xx -M: David Gibson L: qemu-ppc@nongnu.org -S: Odd Fixes +S: Orphan F: hw/ppc/ppc4*.c F: hw/i2c/ppc4xx_i2c.c F: include/hw/ppc/ppc4xx.h @@ -1879,7 +1878,6 @@ F: docs/igd-assign.txt F: docs/devel/vfio-migration.rst vfio-ccw -M: Cornelia Huck M: Eric Farman M: Matthew Rosato S: Supported @@ -1887,7 +1885,6 @@ F: hw/vfio/ccw.c F: hw/s390x/s390-ccw.c F: include/hw/s390x/s390-ccw.h F: include/hw/s390x/vfio-ccw.h -T: git https://gitlab.com/cohuck/qemu.git s390-next L: qemu-s390x@nongnu.org vfio-ap @@ -2123,7 +2120,7 @@ M: Alex Bennée S: Maintained F: hw/core/guest-loader.c F: docs/system/guest-loader.rst -F: tests/acceptance/boot_xen.py +F: tests/avocado/boot_xen.py Intel Hexadecimal Object File Loader M: Su Hang @@ -2242,8 +2239,6 @@ T: git https://github.com/philmd/qemu.git fw_cfg-next XIVE M: Cédric Le Goater -R: David Gibson -R: Greg Kurz L: qemu-ppc@nongnu.org S: Supported F: hw/*/*xive* @@ -2279,6 +2274,26 @@ F: net/can/* F: hw/net/can/* F: include/net/can_*.h +OpenPIC interrupt controller +M: Mark Cave-Ayland +S: Odd Fixes +F: hw/intc/openpic.c +F: include/hw/ppc/openpic.h + +MIPS CPS +M: Philippe Mathieu-Daudé +S: Odd Fixes +F: hw/misc/mips_* +F: include/hw/misc/mips_* + +MIPS GIC +M: Philippe Mathieu-Daudé +S: Odd Fixes +F: hw/intc/mips_gic.c +F: hw/timer/mips_gictimer.c +F: include/hw/intc/mips_gic.h +F: include/hw/timer/mips_gictimer.h + Subsystems ---------- Overall Audio backends @@ -2297,11 +2312,13 @@ F: qapi/audio.json ALSA Audio backend M: Gerd Hoffmann +R: Christian Schoenebeck S: Odd Fixes F: audio/alsaaudio.c Core Audio framework backend M: Gerd Hoffmann +R: Christian Schoenebeck S: Odd Fixes F: audio/coreaudio.c @@ -2312,6 +2329,7 @@ F: audio/dsound* JACK Audio Connection Kit backend M: Gerd Hoffmann +R: Christian Schoenebeck S: Odd Fixes F: audio/jackaudio.c @@ -2327,6 +2345,7 @@ F: audio/paaudio.c SDL Audio backend M: Gerd Hoffmann +R: Thomas Huth S: Odd Fixes F: audio/sdlaudio.c @@ -2515,6 +2534,7 @@ Memory API M: Paolo Bonzini M: Peter Xu M: David Hildenbrand +R: Philippe Mathieu-Daudé S: Supported F: include/exec/ioport.h F: include/exec/memop.h @@ -2974,9 +2994,9 @@ F: net/filter-replay.c F: include/sysemu/replay.h F: docs/replay.txt F: stubs/replay.c -F: tests/acceptance/replay_kernel.py -F: tests/acceptance/replay_linux.py -F: tests/acceptance/reverse_debugging.py +F: tests/avocado/replay_kernel.py +F: tests/avocado/replay_linux.py +F: tests/avocado/reverse_debugging.py F: qapi/replay.json IOVA Tree @@ -3093,7 +3113,7 @@ S: Maintained F: docs/devel/tcg-plugins.rst F: plugins/ F: tests/plugin/ -F: tests/acceptance/tcg_plugins.py +F: tests/avocado/tcg_plugins.py F: contrib/plugins/ AArch64 TCG target @@ -3482,14 +3502,14 @@ S: Maintained F: tests/tcg/Makefile F: tests/tcg/Makefile.include -Acceptance (Integration) Testing with the Avocado framework +Integration Testing with the Avocado framework W: https://trello.com/b/6Qi1pxVn/avocado-qemu R: Cleber Rosa R: Philippe Mathieu-Daudé R: Wainer dos Santos Moschetta R: Willian Rampazzo S: Odd Fixes -F: tests/acceptance/ +F: tests/avocado/ Documentation ------------- diff --git a/Makefile b/Makefile index 401c623a65..74c5b46d38 100644 --- a/Makefile +++ b/Makefile @@ -87,7 +87,7 @@ x := $(shell rm -rf meson-private meson-info meson-logs) endif # 1. ensure config-host.mak is up-to-date -config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/pc-bios $(SRC_PATH)/VERSION +config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/scripts/meson-buildoptions.sh $(SRC_PATH)/pc-bios $(SRC_PATH)/VERSION @echo config-host.mak is out-of-date, running configure @if test -f meson-private/coredata.dat; then \ ./config.status --skip-meson; \ @@ -124,6 +124,12 @@ ifneq ($(MESON),) Makefile.mtest: build.ninja scripts/mtest2make.py $(MESON) introspect --targets --tests --benchmarks | $(PYTHON) scripts/mtest2make.py > $@ -include Makefile.mtest + +.PHONY: update-buildoptions +all update-buildoptions: $(SRC_PATH)/scripts/meson-buildoptions.sh +$(SRC_PATH)/scripts/meson-buildoptions.sh: $(SRC_PATH)/meson_options.txt + $(MESON) introspect --buildoptions $(SRC_PATH)/meson.build | $(PYTHON) \ + scripts/meson-buildoptions.py > $@.tmp && mv $@.tmp $@ endif # 4. Rules to bridge to other makefiles @@ -229,7 +235,8 @@ distclean: clean rm -f linux-headers/asm rm -Rf .sdk -find-src-path = find "$(SRC_PATH)/" -path "$(SRC_PATH)/meson" -prune -o \( -name "*.[chsS]" -o -name "*.[ch].inc" \) +find-src-path = find "$(SRC_PATH)" -path "$(SRC_PATH)/meson" -prune -o \ + -type l -prune -o \( -name "*.[chsS]" -o -name "*.[ch].inc" \) .PHONY: ctags ctags: @@ -250,7 +257,7 @@ gtags: "GTAGS", "Remove old $@ files") $(call quiet-command, \ (cd $(SRC_PATH) && \ - $(find-src-path) | gtags -f -), \ + $(find-src-path) -print | gtags -f -), \ "GTAGS", "Re-index $(SRC_PATH)") .PHONY: TAGS diff --git a/README.qemu.rst b/README.qemu.rst index 79b19f1481..23795b8377 100644 --- a/README.qemu.rst +++ b/README.qemu.rst @@ -59,9 +59,9 @@ of other UNIX targets. The simple steps to build QEMU are: Additional information can also be found online via the QEMU website: -* ``_ -* ``_ -* ``_ +* ``_ +* ``_ +* ``_ Submitting patches @@ -84,8 +84,8 @@ the Developers Guide. Additional information on submitting patches can be found online via the QEMU website -* ``_ -* ``_ +* ``_ +* ``_ The QEMU website is also maintained under source control. @@ -144,7 +144,7 @@ reported via GitLab. For additional information on bug reporting consult: -* ``_ +* ``_ ChangeLog @@ -168,4 +168,4 @@ main methods being email and IRC Information on additional methods of contacting the community can be found online via the QEMU website: -* ``_ +* ``_ diff --git a/VERSION b/VERSION index 0ad6cf7fe6..8de48381d2 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -6.1.50 +6.1.90 diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c index 93976f4ece..54457c76c2 100644 --- a/accel/hvf/hvf-accel-ops.c +++ b/accel/hvf/hvf-accel-ops.c @@ -122,6 +122,7 @@ static void hvf_set_phys_mem(MemoryRegionSection *section, bool add) MemoryRegion *area = section->mr; bool writeable = !area->readonly && !area->rom_device; hv_memory_flags_t flags; + uint64_t page_size = qemu_real_host_page_size; if (!memory_region_is_ram(area)) { if (writeable) { @@ -135,6 +136,12 @@ static void hvf_set_phys_mem(MemoryRegionSection *section, bool add) } } + if (!QEMU_IS_ALIGNED(int128_get64(section->size), page_size) || + !QEMU_IS_ALIGNED(section->offset_within_address_space, page_size)) { + /* Not page aligned, so we can not map as RAM */ + add = false; + } + mem = hvf_find_overlap_slot( section->offset_within_address_space, int128_get64(section->size)); @@ -295,6 +302,7 @@ static void hvf_region_del(MemoryListener *listener, } static MemoryListener hvf_memory_listener = { + .name = "hvf", .priority = 10, .region_add = hvf_region_add, .region_del = hvf_region_del, @@ -320,7 +328,7 @@ static int hvf_accel_init(MachineState *ms) s = g_new0(HVFState, 1); - s->num_slots = 32; + s->num_slots = ARRAY_SIZE(s->slots); for (x = 0; x < s->num_slots; ++x) { s->slots[x].size = 0; s->slots[x].slot_id = x; diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index cace5ffe64..eecd8031cf 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -469,6 +469,7 @@ int kvm_init_vcpu(CPUState *cpu, Error **errp) cpu->kvm_fd = ret; cpu->kvm_state = s; cpu->vcpu_dirty = true; + cpu->dirty_pages = 0; mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); if (mmap_size < 0) { @@ -743,6 +744,7 @@ static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu) count++; } cpu->kvm_fetch_index = fetch; + cpu->dirty_pages += count; return count; } @@ -1129,6 +1131,7 @@ static void kvm_coalesce_pio_del(MemoryListener *listener, } static MemoryListener kvm_coalesced_pio_listener = { + .name = "kvm-coalesced-pio", .coalesced_io_add = kvm_coalesce_pio_add, .coalesced_io_del = kvm_coalesce_pio_del, }; @@ -1633,7 +1636,7 @@ static void kvm_io_ioeventfd_del(MemoryListener *listener, } void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, - AddressSpace *as, int as_id) + AddressSpace *as, int as_id, const char *name) { int i; @@ -1649,6 +1652,7 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, kml->listener.log_start = kvm_log_start; kml->listener.log_stop = kvm_log_stop; kml->listener.priority = 10; + kml->listener.name = name; if (s->kvm_dirty_ring_size) { kml->listener.log_sync_global = kvm_log_sync_global; @@ -1669,6 +1673,7 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, } static MemoryListener kvm_io_listener = { + .name = "kvm-io", .eventfd_add = kvm_io_ioeventfd_add, .eventfd_del = kvm_io_ioeventfd_del, .priority = 10, @@ -2293,6 +2298,11 @@ bool kvm_vcpu_id_is_valid(int vcpu_id) return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s); } +bool kvm_dirty_ring_enabled(void) +{ + return kvm_state->kvm_dirty_ring_size ? true : false; +} + static int kvm_init(MachineState *ms) { MachineClass *mc = MACHINE_GET_CLASS(ms); @@ -2579,7 +2589,7 @@ static int kvm_init(MachineState *ms) s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region; kvm_memory_listener_register(s, &s->memory_listener, - &address_space_memory, 0); + &address_space_memory, 0, "kvm-memory"); if (kvm_eventfds_allowed) { memory_listener_register(&kvm_io_listener, &address_space_io); diff --git a/accel/kvm/meson.build b/accel/kvm/meson.build index 8d219bea50..397a1fe1fd 100644 --- a/accel/kvm/meson.build +++ b/accel/kvm/meson.build @@ -3,6 +3,5 @@ kvm_ss.add(files( 'kvm-all.c', 'kvm-accel-ops.c', )) -kvm_ss.add(when: 'CONFIG_SEV', if_false: files('sev-stub.c')) specific_ss.add_all(when: 'CONFIG_KVM', if_true: kvm_ss) diff --git a/accel/stubs/kvm-stub.c b/accel/stubs/kvm-stub.c index 5b1d00a222..5319573e00 100644 --- a/accel/stubs/kvm-stub.c +++ b/accel/stubs/kvm-stub.c @@ -147,4 +147,9 @@ bool kvm_arm_supports_user_irq(void) { return false; } + +bool kvm_dirty_ring_enabled(void) +{ + return false; +} #endif diff --git a/accel/tcg/atomic_common.c.inc b/accel/tcg/atomic_common.c.inc index 6c0339f610..1df1f243e9 100644 --- a/accel/tcg/atomic_common.c.inc +++ b/accel/tcg/atomic_common.c.inc @@ -13,56 +13,43 @@ * See the COPYING file in the top-level directory. */ -static uint16_t atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi) +static void atomic_trace_rmw_pre(CPUArchState *env, target_ulong addr, + MemOpIdx oi) { CPUState *cpu = env_cpu(env); - uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), false); - trace_guest_mem_before_exec(cpu, addr, info); - trace_guest_mem_before_exec(cpu, addr, info | TRACE_MEM_ST); - - return info; + trace_guest_rmw_before_exec(cpu, addr, oi); } static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr, - uint16_t info) + MemOpIdx oi) { - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info); - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info | TRACE_MEM_ST); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW); } #if HAVE_ATOMIC128 -static uint16_t atomic_trace_ld_pre(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi) +static void atomic_trace_ld_pre(CPUArchState *env, target_ulong addr, + MemOpIdx oi) { - uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), false); - - trace_guest_mem_before_exec(env_cpu(env), addr, info); - - return info; + trace_guest_ld_before_exec(env_cpu(env), addr, oi); } static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr, - uint16_t info) + MemOpIdx oi) { - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); } -static uint16_t atomic_trace_st_pre(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi) +static void atomic_trace_st_pre(CPUArchState *env, target_ulong addr, + MemOpIdx oi) { - uint16_t info = trace_mem_get_info(get_memop(oi), get_mmuidx(oi), true); - - trace_guest_mem_before_exec(env_cpu(env), addr, info); - - return info; + trace_guest_st_before_exec(env_cpu(env), addr, oi); } static void atomic_trace_st_post(CPUArchState *env, target_ulong addr, - uint16_t info) + MemOpIdx oi) { - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, info); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } #endif diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h index 8098a1be31..2d917b6b1f 100644 --- a/accel/tcg/atomic_template.h +++ b/accel/tcg/atomic_template.h @@ -19,7 +19,6 @@ */ #include "qemu/plugin.h" -#include "trace/mem.h" #if DATA_SIZE == 16 # define SUFFIX o @@ -72,77 +71,77 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, ABI_TYPE cmpv, ABI_TYPE newv, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, retaddr); DATA_TYPE ret; - uint16_t info = atomic_trace_rmw_pre(env, addr, oi); + atomic_trace_rmw_pre(env, addr, oi); #if DATA_SIZE == 16 ret = atomic16_cmpxchg(haddr, cmpv, newv); #else ret = qatomic_cmpxchg__nocheck(haddr, cmpv, newv); #endif ATOMIC_MMU_CLEANUP; - atomic_trace_rmw_post(env, addr, info); + atomic_trace_rmw_post(env, addr, oi); return ret; } #if DATA_SIZE >= 16 #if HAVE_ATOMIC128 ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ, retaddr); DATA_TYPE val; - uint16_t info = atomic_trace_ld_pre(env, addr, oi); + atomic_trace_ld_pre(env, addr, oi); val = atomic16_read(haddr); ATOMIC_MMU_CLEANUP; - atomic_trace_ld_post(env, addr, info); + atomic_trace_ld_post(env, addr, oi); return val; } void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_WRITE, retaddr); - uint16_t info = atomic_trace_st_pre(env, addr, oi); + atomic_trace_st_pre(env, addr, oi); atomic16_set(haddr, val); ATOMIC_MMU_CLEANUP; - atomic_trace_st_post(env, addr, info); + atomic_trace_st_post(env, addr, oi); } #endif #else ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, retaddr); DATA_TYPE ret; - uint16_t info = atomic_trace_rmw_pre(env, addr, oi); + atomic_trace_rmw_pre(env, addr, oi); ret = qatomic_xchg__nocheck(haddr, val); ATOMIC_MMU_CLEANUP; - atomic_trace_rmw_post(env, addr, info); + atomic_trace_rmw_post(env, addr, oi); return ret; } #define GEN_ATOMIC_HELPER(X) \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ - ABI_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) \ + ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \ { \ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ PAGE_READ | PAGE_WRITE, retaddr); \ DATA_TYPE ret; \ - uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \ + atomic_trace_rmw_pre(env, addr, oi); \ ret = qatomic_##X(haddr, val); \ ATOMIC_MMU_CLEANUP; \ - atomic_trace_rmw_post(env, addr, info); \ + atomic_trace_rmw_post(env, addr, oi); \ return ret; \ } @@ -167,12 +166,12 @@ GEN_ATOMIC_HELPER(xor_fetch) */ #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ - ABI_TYPE xval, TCGMemOpIdx oi, uintptr_t retaddr) \ + ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \ { \ XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ PAGE_READ | PAGE_WRITE, retaddr); \ XDATA_TYPE cmp, old, new, val = xval; \ - uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \ + atomic_trace_rmw_pre(env, addr, oi); \ smp_mb(); \ cmp = qatomic_read__nocheck(haddr); \ do { \ @@ -180,7 +179,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ cmp = qatomic_cmpxchg__nocheck(haddr, old, new); \ } while (cmp != old); \ ATOMIC_MMU_CLEANUP; \ - atomic_trace_rmw_post(env, addr, info); \ + atomic_trace_rmw_post(env, addr, oi); \ return RET; \ } @@ -211,78 +210,78 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new) ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr, ABI_TYPE cmpv, ABI_TYPE newv, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, retaddr); DATA_TYPE ret; - uint16_t info = atomic_trace_rmw_pre(env, addr, oi); + atomic_trace_rmw_pre(env, addr, oi); #if DATA_SIZE == 16 ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv)); #else ret = qatomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv)); #endif ATOMIC_MMU_CLEANUP; - atomic_trace_rmw_post(env, addr, info); + atomic_trace_rmw_post(env, addr, oi); return BSWAP(ret); } #if DATA_SIZE >= 16 #if HAVE_ATOMIC128 ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ, retaddr); DATA_TYPE val; - uint16_t info = atomic_trace_ld_pre(env, addr, oi); + atomic_trace_ld_pre(env, addr, oi); val = atomic16_read(haddr); ATOMIC_MMU_CLEANUP; - atomic_trace_ld_post(env, addr, info); + atomic_trace_ld_post(env, addr, oi); return BSWAP(val); } void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_WRITE, retaddr); - uint16_t info = atomic_trace_st_pre(env, addr, oi); + atomic_trace_st_pre(env, addr, oi); val = BSWAP(val); atomic16_set(haddr, val); ATOMIC_MMU_CLEANUP; - atomic_trace_st_post(env, addr, info); + atomic_trace_st_post(env, addr, oi); } #endif #else ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, PAGE_READ | PAGE_WRITE, retaddr); ABI_TYPE ret; - uint16_t info = atomic_trace_rmw_pre(env, addr, oi); + atomic_trace_rmw_pre(env, addr, oi); ret = qatomic_xchg__nocheck(haddr, BSWAP(val)); ATOMIC_MMU_CLEANUP; - atomic_trace_rmw_post(env, addr, info); + atomic_trace_rmw_post(env, addr, oi); return BSWAP(ret); } #define GEN_ATOMIC_HELPER(X) \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ - ABI_TYPE val, TCGMemOpIdx oi, uintptr_t retaddr) \ + ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \ { \ DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ PAGE_READ | PAGE_WRITE, retaddr); \ DATA_TYPE ret; \ - uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \ + atomic_trace_rmw_pre(env, addr, oi); \ ret = qatomic_##X(haddr, BSWAP(val)); \ ATOMIC_MMU_CLEANUP; \ - atomic_trace_rmw_post(env, addr, info); \ + atomic_trace_rmw_post(env, addr, oi); \ return BSWAP(ret); \ } @@ -304,12 +303,12 @@ GEN_ATOMIC_HELPER(xor_fetch) */ #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ - ABI_TYPE xval, TCGMemOpIdx oi, uintptr_t retaddr) \ + ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \ { \ XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \ PAGE_READ | PAGE_WRITE, retaddr); \ XDATA_TYPE ldo, ldn, old, new, val = xval; \ - uint16_t info = atomic_trace_rmw_pre(env, addr, oi); \ + atomic_trace_rmw_pre(env, addr, oi); \ smp_mb(); \ ldn = qatomic_read__nocheck(haddr); \ do { \ @@ -317,7 +316,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \ ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \ } while (ldo != ldn); \ ATOMIC_MMU_CLEANUP; \ - atomic_trace_rmw_post(env, addr, info); \ + atomic_trace_rmw_post(env, addr, oi); \ return RET; \ } diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index 52c6473f81..504342ddf2 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -20,6 +20,9 @@ #include "qemu/osdep.h" #include "qemu-common.h" #include "qemu/qemu-print.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-machine.h" +#include "qapi/type-helpers.h" #include "hw/core/tcg-cpu-ops.h" #include "trace.h" #include "disas/disas.h" @@ -38,6 +41,7 @@ #include "exec/cpu-all.h" #include "sysemu/cpu-timers.h" #include "sysemu/replay.h" +#include "sysemu/tcg.h" #include "exec/helper-proto.h" #include "tb-hash.h" #include "tb-context.h" @@ -383,6 +387,17 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) cc->set_pc(cpu, last_tb->pc); } } + + /* + * If gdb single-step, and we haven't raised another exception, + * raise a debug exception. Single-step with another exception + * is handled in cpu_handle_exception. + */ + if (unlikely(cpu->singlestep_enabled) && cpu->exception_index == -1) { + cpu->exception_index = EXCP_DEBUG; + cpu_loop_exit(cpu); + } + return last_tb; } @@ -451,6 +466,7 @@ void cpu_exec_step_atomic(CPUState *cpu) * memory. */ #ifndef CONFIG_SOFTMMU + clear_helper_retaddr(); tcg_debug_assert(!have_mmap_lock()); #endif if (qemu_mutex_iothread_locked()) { @@ -460,7 +476,6 @@ void cpu_exec_step_atomic(CPUState *cpu) qemu_plugin_disable_mem_helpers(cpu); } - /* * As we start the exclusive region before codegen we must still * be in the region if we longjump out of either the codegen or @@ -925,6 +940,7 @@ int cpu_exec(CPUState *cpu) #endif #ifndef CONFIG_SOFTMMU + clear_helper_retaddr(); tcg_debug_assert(!have_mmap_lock()); #endif if (qemu_mutex_iothread_locked()) { @@ -1057,23 +1073,52 @@ void tcg_exec_unrealizefn(CPUState *cpu) #ifndef CONFIG_USER_ONLY -void dump_drift_info(void) +void dump_drift_info(GString *buf) { if (!icount_enabled()) { return; } - qemu_printf("Host - Guest clock %"PRIi64" ms\n", - (cpu_get_clock() - icount_get()) / SCALE_MS); + g_string_append_printf(buf, "Host - Guest clock %"PRIi64" ms\n", + (cpu_get_clock() - icount_get()) / SCALE_MS); if (icount_align_option) { - qemu_printf("Max guest delay %"PRIi64" ms\n", - -max_delay / SCALE_MS); - qemu_printf("Max guest advance %"PRIi64" ms\n", - max_advance / SCALE_MS); + g_string_append_printf(buf, "Max guest delay %"PRIi64" ms\n", + -max_delay / SCALE_MS); + g_string_append_printf(buf, "Max guest advance %"PRIi64" ms\n", + max_advance / SCALE_MS); } else { - qemu_printf("Max guest delay NA\n"); - qemu_printf("Max guest advance NA\n"); + g_string_append_printf(buf, "Max guest delay NA\n"); + g_string_append_printf(buf, "Max guest advance NA\n"); } } +HumanReadableText *qmp_x_query_jit(Error **errp) +{ + g_autoptr(GString) buf = g_string_new(""); + + if (!tcg_enabled()) { + error_setg(errp, "JIT information is only available with accel=tcg"); + return NULL; + } + + dump_exec_info(buf); + dump_drift_info(buf); + + return human_readable_text_from_str(buf); +} + +HumanReadableText *qmp_x_query_opcount(Error **errp) +{ + g_autoptr(GString) buf = g_string_new(""); + + if (!tcg_enabled()) { + error_setg(errp, "Opcode count information is only available with accel=tcg"); + return NULL; + } + + dump_opcount_info(buf); + + return human_readable_text_from_str(buf); +} + #endif /* !CONFIG_USER_ONLY */ diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index b1e5471f94..b69a953447 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -34,12 +34,12 @@ #include "qemu/atomic128.h" #include "exec/translate-all.h" #include "trace/trace-root.h" -#include "trace/mem.h" #include "tb-hash.h" #include "internal.h" #ifdef CONFIG_PLUGIN #include "qemu/plugin-memory.h" #endif +#include "tcg/tcg-ldst.h" /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ /* #define DEBUG_TLB */ @@ -1749,7 +1749,7 @@ bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. */ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, int size, int prot, + MemOpIdx oi, int size, int prot, uintptr_t retaddr) { size_t mmu_idx = get_mmuidx(oi); @@ -1840,6 +1840,25 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, cpu_loop_exit_atomic(env_cpu(env), retaddr); } +/* + * Verify that we have passed the correct MemOp to the correct function. + * + * In the case of the helper_*_mmu functions, we will have done this by + * using the MemOp to look up the helper during code generation. + * + * In the case of the cpu_*_mmu functions, this is up to the caller. + * We could present one function to target code, and dispatch based on + * the MemOp, but so far we have worked hard to avoid an indirect function + * call along the memory path. + */ +static void validate_memop(MemOpIdx oi, MemOp expected) +{ +#ifdef CONFIG_DEBUG_TCG + MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP); + assert(have == expected); +#endif +} + /* * Load Helpers * @@ -1850,7 +1869,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, */ typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); + MemOpIdx oi, uintptr_t retaddr); static inline uint64_t QEMU_ALWAYS_INLINE load_memop(const void *haddr, MemOp op) @@ -1876,7 +1895,7 @@ load_memop(const void *haddr, MemOp op) } static inline uint64_t QEMU_ALWAYS_INLINE -load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, +load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi, uintptr_t retaddr, MemOp op, bool code_read, FullLoadHelper *full_load) { @@ -1991,79 +2010,86 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, */ static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { + validate_memop(oi, MO_UB); return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); } tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return full_ldub_mmu(env, addr, oi, retaddr); } static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { + validate_memop(oi, MO_LEUW); return load_helper(env, addr, oi, retaddr, MO_LEUW, false, full_le_lduw_mmu); } tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return full_le_lduw_mmu(env, addr, oi, retaddr); } static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { + validate_memop(oi, MO_BEUW); return load_helper(env, addr, oi, retaddr, MO_BEUW, false, full_be_lduw_mmu); } tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return full_be_lduw_mmu(env, addr, oi, retaddr); } static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { + validate_memop(oi, MO_LEUL); return load_helper(env, addr, oi, retaddr, MO_LEUL, false, full_le_ldul_mmu); } tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return full_le_ldul_mmu(env, addr, oi, retaddr); } static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { + validate_memop(oi, MO_BEUL); return load_helper(env, addr, oi, retaddr, MO_BEUL, false, full_be_ldul_mmu); } tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return full_be_ldul_mmu(env, addr, oi, retaddr); } uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { + validate_memop(oi, MO_LEQ); return load_helper(env, addr, oi, retaddr, MO_LEQ, false, helper_le_ldq_mmu); } uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { + validate_memop(oi, MO_BEQ); return load_helper(env, addr, oi, retaddr, MO_BEQ, false, helper_be_ldq_mmu); } @@ -2075,31 +2101,31 @@ uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr); } tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr); } tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr); } tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr); } tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr); } @@ -2109,193 +2135,56 @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, */ static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t retaddr, - MemOp op, FullLoadHelper *full_load) + MemOpIdx oi, uintptr_t retaddr, + FullLoadHelper *full_load) { - uint16_t meminfo; - TCGMemOpIdx oi; uint64_t ret; - meminfo = trace_mem_get_info(op, mmu_idx, false); - trace_guest_mem_before_exec(env_cpu(env), addr, meminfo); - - op &= ~MO_SIGN; - oi = make_memop_idx(op, mmu_idx); + trace_guest_ld_before_exec(env_cpu(env), addr, oi); ret = full_load(env, addr, oi, retaddr); - - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo); - + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return ret; } -uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) +uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) { - return cpu_load_helper(env, addr, mmu_idx, ra, MO_UB, full_ldub_mmu); + return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu); } -int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) +uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { - return (int8_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_SB, - full_ldub_mmu); + return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu); } -uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) +uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { - return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUW, full_be_lduw_mmu); + return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu); } -int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) +uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { - return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_BESW, - full_be_lduw_mmu); + return cpu_load_helper(env, addr, oi, MO_BEQ, helper_be_ldq_mmu); } -uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) +uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { - return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEUL, full_be_ldul_mmu); + return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu); } -uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) +uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { - return cpu_load_helper(env, addr, mmu_idx, ra, MO_BEQ, helper_be_ldq_mmu); + return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu); } -uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) +uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { - return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUW, full_le_lduw_mmu); -} - -int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return (int16_t)cpu_load_helper(env, addr, mmu_idx, ra, MO_LESW, - full_le_lduw_mmu); -} - -uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEUL, full_le_ldul_mmu); -} - -uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return cpu_load_helper(env, addr, mmu_idx, ra, MO_LEQ, helper_le_ldq_mmu); -} - -uint32_t cpu_ldub_data_ra(CPUArchState *env, target_ulong ptr, - uintptr_t retaddr) -{ - return cpu_ldub_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); -} - -int cpu_ldsb_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) -{ - return cpu_ldsb_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); -} - -uint32_t cpu_lduw_be_data_ra(CPUArchState *env, target_ulong ptr, - uintptr_t retaddr) -{ - return cpu_lduw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); -} - -int cpu_ldsw_be_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) -{ - return cpu_ldsw_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); -} - -uint32_t cpu_ldl_be_data_ra(CPUArchState *env, target_ulong ptr, - uintptr_t retaddr) -{ - return cpu_ldl_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); -} - -uint64_t cpu_ldq_be_data_ra(CPUArchState *env, target_ulong ptr, - uintptr_t retaddr) -{ - return cpu_ldq_be_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); -} - -uint32_t cpu_lduw_le_data_ra(CPUArchState *env, target_ulong ptr, - uintptr_t retaddr) -{ - return cpu_lduw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); -} - -int cpu_ldsw_le_data_ra(CPUArchState *env, target_ulong ptr, uintptr_t retaddr) -{ - return cpu_ldsw_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); -} - -uint32_t cpu_ldl_le_data_ra(CPUArchState *env, target_ulong ptr, - uintptr_t retaddr) -{ - return cpu_ldl_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); -} - -uint64_t cpu_ldq_le_data_ra(CPUArchState *env, target_ulong ptr, - uintptr_t retaddr) -{ - return cpu_ldq_le_mmuidx_ra(env, ptr, cpu_mmu_index(env, false), retaddr); -} - -uint32_t cpu_ldub_data(CPUArchState *env, target_ulong ptr) -{ - return cpu_ldub_data_ra(env, ptr, 0); -} - -int cpu_ldsb_data(CPUArchState *env, target_ulong ptr) -{ - return cpu_ldsb_data_ra(env, ptr, 0); -} - -uint32_t cpu_lduw_be_data(CPUArchState *env, target_ulong ptr) -{ - return cpu_lduw_be_data_ra(env, ptr, 0); -} - -int cpu_ldsw_be_data(CPUArchState *env, target_ulong ptr) -{ - return cpu_ldsw_be_data_ra(env, ptr, 0); -} - -uint32_t cpu_ldl_be_data(CPUArchState *env, target_ulong ptr) -{ - return cpu_ldl_be_data_ra(env, ptr, 0); -} - -uint64_t cpu_ldq_be_data(CPUArchState *env, target_ulong ptr) -{ - return cpu_ldq_be_data_ra(env, ptr, 0); -} - -uint32_t cpu_lduw_le_data(CPUArchState *env, target_ulong ptr) -{ - return cpu_lduw_le_data_ra(env, ptr, 0); -} - -int cpu_ldsw_le_data(CPUArchState *env, target_ulong ptr) -{ - return cpu_ldsw_le_data_ra(env, ptr, 0); -} - -uint32_t cpu_ldl_le_data(CPUArchState *env, target_ulong ptr) -{ - return cpu_ldl_le_data_ra(env, ptr, 0); -} - -uint64_t cpu_ldq_le_data(CPUArchState *env, target_ulong ptr) -{ - return cpu_ldq_le_data_ra(env, ptr, 0); + return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu); } /* @@ -2332,6 +2221,9 @@ store_memop(void *haddr, uint64_t val, MemOp op) } } +static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr); + static void __attribute__((noinline)) store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, uintptr_t retaddr, size_t size, uintptr_t mmu_idx, @@ -2341,7 +2233,7 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, uintptr_t index, index2; CPUTLBEntry *entry, *entry2; target_ulong page2, tlb_addr, tlb_addr2; - TCGMemOpIdx oi; + MemOpIdx oi; size_t size2; int i; @@ -2395,20 +2287,20 @@ store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, for (i = 0; i < size; ++i) { /* Big-endian extract. */ uint8_t val8 = val >> (((size - 1) * 8) - (i * 8)); - helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr); + full_stb_mmu(env, addr + i, val8, oi, retaddr); } } else { for (i = 0; i < size; ++i) { /* Little-endian extract. */ uint8_t val8 = val >> (i * 8); - helper_ret_stb_mmu(env, addr + i, val8, oi, retaddr); + full_stb_mmu(env, addr + i, val8, oi, retaddr); } } } static inline void QEMU_ALWAYS_INLINE store_helper(CPUArchState *env, target_ulong addr, uint64_t val, - TCGMemOpIdx oi, uintptr_t retaddr, MemOp op) + MemOpIdx oi, uintptr_t retaddr, MemOp op) { uintptr_t mmu_idx = get_mmuidx(oi); uintptr_t index = tlb_index(env, mmu_idx, addr); @@ -2504,46 +2396,83 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, store_memop(haddr, val, op); } -void __attribute__((noinline)) -helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, - TCGMemOpIdx oi, uintptr_t retaddr) +static void __attribute__((noinline)) +full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) { + validate_memop(oi, MO_UB); store_helper(env, addr, val, oi, retaddr, MO_UB); } -void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, - TCGMemOpIdx oi, uintptr_t retaddr) +void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, + MemOpIdx oi, uintptr_t retaddr) { + full_stb_mmu(env, addr, val, oi, retaddr); +} + +static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_LEUW); store_helper(env, addr, val, oi, retaddr, MO_LEUW); } -void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, - TCGMemOpIdx oi, uintptr_t retaddr) +void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + MemOpIdx oi, uintptr_t retaddr) { + full_le_stw_mmu(env, addr, val, oi, retaddr); +} + +static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_BEUW); store_helper(env, addr, val, oi, retaddr, MO_BEUW); } -void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - TCGMemOpIdx oi, uintptr_t retaddr) +void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + MemOpIdx oi, uintptr_t retaddr) { + full_be_stw_mmu(env, addr, val, oi, retaddr); +} + +static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_LEUL); store_helper(env, addr, val, oi, retaddr, MO_LEUL); } -void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - TCGMemOpIdx oi, uintptr_t retaddr) +void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr) { + full_le_stl_mmu(env, addr, val, oi, retaddr); +} + +static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_BEUL); store_helper(env, addr, val, oi, retaddr, MO_BEUL); } -void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - TCGMemOpIdx oi, uintptr_t retaddr) +void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr) { + full_be_stl_mmu(env, addr, val, oi, retaddr); +} + +void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) +{ + validate_memop(oi, MO_LEQ); store_helper(env, addr, val, oi, retaddr, MO_LEQ); } void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { + validate_memop(oi, MO_BEQ); store_helper(env, addr, val, oi, retaddr, MO_BEQ); } @@ -2551,140 +2480,61 @@ void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, * Store Helpers for cpu_ldst.h */ -static inline void QEMU_ALWAYS_INLINE -cpu_store_helper(CPUArchState *env, target_ulong addr, uint64_t val, - int mmu_idx, uintptr_t retaddr, MemOp op) +typedef void FullStoreHelper(CPUArchState *env, target_ulong addr, + uint64_t val, MemOpIdx oi, uintptr_t retaddr); + +static inline void cpu_store_helper(CPUArchState *env, target_ulong addr, + uint64_t val, MemOpIdx oi, uintptr_t ra, + FullStoreHelper *full_store) { - TCGMemOpIdx oi; - uint16_t meminfo; - - meminfo = trace_mem_get_info(op, mmu_idx, true); - trace_guest_mem_before_exec(env_cpu(env), addr, meminfo); - - oi = make_memop_idx(op, mmu_idx); - store_helper(env, addr, val, oi, retaddr, op); - - qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, meminfo); + trace_guest_st_before_exec(env_cpu(env), addr, oi); + full_store(env, addr, val, oi, ra); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } -void cpu_stb_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, - int mmu_idx, uintptr_t retaddr) +void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, + MemOpIdx oi, uintptr_t retaddr) { - cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_UB); + cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu); } -void cpu_stw_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, - int mmu_idx, uintptr_t retaddr) +void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + MemOpIdx oi, uintptr_t retaddr) { - cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUW); + cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu); } -void cpu_stl_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, - int mmu_idx, uintptr_t retaddr) +void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr) { - cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEUL); + cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu); } -void cpu_stq_be_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val, - int mmu_idx, uintptr_t retaddr) +void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) { - cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_BEQ); + cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu); } -void cpu_stw_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, - int mmu_idx, uintptr_t retaddr) +void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + MemOpIdx oi, uintptr_t retaddr) { - cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUW); + cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu); } -void cpu_stl_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint32_t val, - int mmu_idx, uintptr_t retaddr) +void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr) { - cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEUL); + cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu); } -void cpu_stq_le_mmuidx_ra(CPUArchState *env, target_ulong addr, uint64_t val, - int mmu_idx, uintptr_t retaddr) +void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr) { - cpu_store_helper(env, addr, val, mmu_idx, retaddr, MO_LEQ); + cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu); } -void cpu_stb_data_ra(CPUArchState *env, target_ulong ptr, - uint32_t val, uintptr_t retaddr) -{ - cpu_stb_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); -} - -void cpu_stw_be_data_ra(CPUArchState *env, target_ulong ptr, - uint32_t val, uintptr_t retaddr) -{ - cpu_stw_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); -} - -void cpu_stl_be_data_ra(CPUArchState *env, target_ulong ptr, - uint32_t val, uintptr_t retaddr) -{ - cpu_stl_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); -} - -void cpu_stq_be_data_ra(CPUArchState *env, target_ulong ptr, - uint64_t val, uintptr_t retaddr) -{ - cpu_stq_be_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); -} - -void cpu_stw_le_data_ra(CPUArchState *env, target_ulong ptr, - uint32_t val, uintptr_t retaddr) -{ - cpu_stw_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); -} - -void cpu_stl_le_data_ra(CPUArchState *env, target_ulong ptr, - uint32_t val, uintptr_t retaddr) -{ - cpu_stl_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); -} - -void cpu_stq_le_data_ra(CPUArchState *env, target_ulong ptr, - uint64_t val, uintptr_t retaddr) -{ - cpu_stq_le_mmuidx_ra(env, ptr, val, cpu_mmu_index(env, false), retaddr); -} - -void cpu_stb_data(CPUArchState *env, target_ulong ptr, uint32_t val) -{ - cpu_stb_data_ra(env, ptr, val, 0); -} - -void cpu_stw_be_data(CPUArchState *env, target_ulong ptr, uint32_t val) -{ - cpu_stw_be_data_ra(env, ptr, val, 0); -} - -void cpu_stl_be_data(CPUArchState *env, target_ulong ptr, uint32_t val) -{ - cpu_stl_be_data_ra(env, ptr, val, 0); -} - -void cpu_stq_be_data(CPUArchState *env, target_ulong ptr, uint64_t val) -{ - cpu_stq_be_data_ra(env, ptr, val, 0); -} - -void cpu_stw_le_data(CPUArchState *env, target_ulong ptr, uint32_t val) -{ - cpu_stw_le_data_ra(env, ptr, val, 0); -} - -void cpu_stl_le_data(CPUArchState *env, target_ulong ptr, uint32_t val) -{ - cpu_stl_le_data_ra(env, ptr, val, 0); -} - -void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val) -{ - cpu_stq_le_data_ra(env, ptr, val, 0); -} +#include "ldst_common.c.inc" /* * First set of functions passes in OI and RETADDR. @@ -2721,49 +2571,49 @@ void cpu_stq_le_data(CPUArchState *env, target_ulong ptr, uint64_t val) /* Code access functions. */ static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code); } uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) { - TCGMemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); + MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); return full_ldub_code(env, addr, oi, 0); } static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code); } uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) { - TCGMemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); + MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); return full_lduw_code(env, addr, oi, 0); } static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code); } uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) { - TCGMemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); + MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); return full_ldl_code(env, addr, oi, 0); } static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr) + MemOpIdx oi, uintptr_t retaddr) { return load_helper(env, addr, oi, retaddr, MO_TEQ, true, full_ldq_code); } uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) { - TCGMemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true)); + MemOpIdx oi = make_memop_idx(MO_TEQ, cpu_mmu_index(env, true)); return full_ldq_code(env, addr, oi, 0); } diff --git a/accel/tcg/hmp.c b/accel/tcg/hmp.c index a6e72fdb3e..d2ea352655 100644 --- a/accel/tcg/hmp.c +++ b/accel/tcg/hmp.c @@ -1,29 +1,15 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-machine.h" #include "exec/exec-all.h" #include "monitor/monitor.h" #include "sysemu/tcg.h" -static void hmp_info_jit(Monitor *mon, const QDict *qdict) -{ - if (!tcg_enabled()) { - error_report("JIT information is only available with accel=tcg"); - return; - } - - dump_exec_info(); - dump_drift_info(); -} - -static void hmp_info_opcount(Monitor *mon, const QDict *qdict) -{ - dump_opcount_info(); -} - static void hmp_tcg_register(void) { - monitor_register_hmp("jit", true, hmp_info_jit); - monitor_register_hmp("opcount", true, hmp_info_opcount); + monitor_register_hmp_info_hrt("jit", qmp_x_query_jit); + monitor_register_hmp_info_hrt("opcount", qmp_x_query_opcount); } type_init(hmp_tcg_register); diff --git a/accel/tcg/ldst_common.c.inc b/accel/tcg/ldst_common.c.inc new file mode 100644 index 0000000000..bfefb275e7 --- /dev/null +++ b/accel/tcg/ldst_common.c.inc @@ -0,0 +1,307 @@ +/* + * Routines common to user and system emulation of load/store. + * + * Copyright (c) 2003 Fabrice Bellard + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); + return cpu_ldb_mmu(env, addr, oi, ra); +} + +int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra); +} + +uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx); + return cpu_ldw_be_mmu(env, addr, oi, ra); +} + +int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra); +} + +uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx); + return cpu_ldl_be_mmu(env, addr, oi, ra); +} + +uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEQ | MO_UNALN, mmu_idx); + return cpu_ldq_be_mmu(env, addr, oi, ra); +} + +uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx); + return cpu_ldw_le_mmu(env, addr, oi, ra); +} + +int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra); +} + +uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx); + return cpu_ldl_le_mmu(env, addr, oi, ra); +} + +uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEQ | MO_UNALN, mmu_idx); + return cpu_ldq_le_mmu(env, addr, oi, ra); +} + +void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); + cpu_stb_mmu(env, addr, val, oi, ra); +} + +void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx); + cpu_stw_be_mmu(env, addr, val, oi, ra); +} + +void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx); + cpu_stl_be_mmu(env, addr, val, oi, ra); +} + +void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_BEQ | MO_UNALN, mmu_idx); + cpu_stq_be_mmu(env, addr, val, oi, ra); +} + +void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx); + cpu_stw_le_mmu(env, addr, val, oi, ra); +} + +void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx); + cpu_stl_le_mmu(env, addr, val, oi, ra); +} + +void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, + int mmu_idx, uintptr_t ra) +{ + MemOpIdx oi = make_memop_idx(MO_LEQ | MO_UNALN, mmu_idx); + cpu_stq_le_mmu(env, addr, val, oi, ra); +} + +/*--------------------------*/ + +uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_ldub_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return (int8_t)cpu_ldub_data_ra(env, addr, ra); +} + +uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_lduw_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return (int16_t)cpu_lduw_be_data_ra(env, addr, ra); +} + +uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_ldl_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_ldq_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_lduw_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return (int16_t)cpu_lduw_le_data_ra(env, addr, ra); +} + +uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_ldl_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra) +{ + return cpu_ldq_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra); +} + +void cpu_stb_data_ra(CPUArchState *env, abi_ptr addr, + uint32_t val, uintptr_t ra) +{ + cpu_stb_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr addr, + uint32_t val, uintptr_t ra) +{ + cpu_stw_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr addr, + uint32_t val, uintptr_t ra) +{ + cpu_stl_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr addr, + uint64_t val, uintptr_t ra) +{ + cpu_stq_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr addr, + uint32_t val, uintptr_t ra) +{ + cpu_stw_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr addr, + uint32_t val, uintptr_t ra) +{ + cpu_stl_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr addr, + uint64_t val, uintptr_t ra) +{ + cpu_stq_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra); +} + +/*--------------------------*/ + +uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldub_data_ra(env, addr, 0); +} + +int cpu_ldsb_data(CPUArchState *env, abi_ptr addr) +{ + return (int8_t)cpu_ldub_data(env, addr); +} + +uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_lduw_be_data_ra(env, addr, 0); +} + +int cpu_ldsw_be_data(CPUArchState *env, abi_ptr addr) +{ + return (int16_t)cpu_lduw_be_data(env, addr); +} + +uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldl_be_data_ra(env, addr, 0); +} + +uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldq_be_data_ra(env, addr, 0); +} + +uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_lduw_le_data_ra(env, addr, 0); +} + +int cpu_ldsw_le_data(CPUArchState *env, abi_ptr addr) +{ + return (int16_t)cpu_lduw_le_data(env, addr); +} + +uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldl_le_data_ra(env, addr, 0); +} + +uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr addr) +{ + return cpu_ldq_le_data_ra(env, addr, 0); +} + +void cpu_stb_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stb_data_ra(env, addr, val, 0); +} + +void cpu_stw_be_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stw_be_data_ra(env, addr, val, 0); +} + +void cpu_stl_be_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stl_be_data_ra(env, addr, val, 0); +} + +void cpu_stq_be_data(CPUArchState *env, abi_ptr addr, uint64_t val) +{ + cpu_stq_be_data_ra(env, addr, val, 0); +} + +void cpu_stw_le_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stw_le_data_ra(env, addr, val, 0); +} + +void cpu_stl_le_data(CPUArchState *env, abi_ptr addr, uint32_t val) +{ + cpu_stl_le_data_ra(env, addr, val, 0); +} + +void cpu_stq_le_data(CPUArchState *env, abi_ptr addr, uint64_t val) +{ + cpu_stq_le_data_ra(env, addr, val, 0); +} diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c index 88e25c6df9..22d95fe1c3 100644 --- a/accel/tcg/plugin-gen.c +++ b/accel/tcg/plugin-gen.c @@ -45,7 +45,6 @@ #include "qemu/osdep.h" #include "tcg/tcg.h" #include "tcg/tcg-op.h" -#include "trace/mem.h" #include "exec/exec-all.h" #include "exec/plugin-gen.h" #include "exec/translator.h" @@ -163,11 +162,7 @@ static void gen_empty_mem_helper(void) static void gen_plugin_cb_start(enum plugin_gen_from from, enum plugin_gen_cb type, unsigned wr) { - TCGOp *op; - tcg_gen_plugin_cb_start(from, type, wr); - op = tcg_last_op(); - QSIMPLEQ_INSERT_TAIL(&tcg_ctx->plugin_ops, op, plugin_link); } static void gen_wrapped(enum plugin_gen_from from, @@ -211,9 +206,9 @@ static void gen_mem_wrapped(enum plugin_gen_cb type, const union mem_gen_fn *f, TCGv addr, uint32_t info, bool is_mem) { - int wr = !!(info & TRACE_MEM_ST); + enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info); - gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, wr); + gen_plugin_cb_start(PLUGIN_GEN_FROM_MEM, type, rw); if (is_mem) { f->mem_fn(addr, info); } else { @@ -707,62 +702,6 @@ static void plugin_gen_disable_mem_helper(const struct qemu_plugin_tb *ptb, inject_mem_disable_helper(insn, begin_op); } -static void plugin_inject_cb(const struct qemu_plugin_tb *ptb, TCGOp *begin_op, - int insn_idx) -{ - enum plugin_gen_from from = begin_op->args[0]; - enum plugin_gen_cb type = begin_op->args[1]; - - switch (from) { - case PLUGIN_GEN_FROM_TB: - switch (type) { - case PLUGIN_GEN_CB_UDATA: - plugin_gen_tb_udata(ptb, begin_op); - return; - case PLUGIN_GEN_CB_INLINE: - plugin_gen_tb_inline(ptb, begin_op); - return; - default: - g_assert_not_reached(); - } - case PLUGIN_GEN_FROM_INSN: - switch (type) { - case PLUGIN_GEN_CB_UDATA: - plugin_gen_insn_udata(ptb, begin_op, insn_idx); - return; - case PLUGIN_GEN_CB_INLINE: - plugin_gen_insn_inline(ptb, begin_op, insn_idx); - return; - case PLUGIN_GEN_ENABLE_MEM_HELPER: - plugin_gen_enable_mem_helper(ptb, begin_op, insn_idx); - return; - default: - g_assert_not_reached(); - } - case PLUGIN_GEN_FROM_MEM: - switch (type) { - case PLUGIN_GEN_CB_MEM: - plugin_gen_mem_regular(ptb, begin_op, insn_idx); - return; - case PLUGIN_GEN_CB_INLINE: - plugin_gen_mem_inline(ptb, begin_op, insn_idx); - return; - default: - g_assert_not_reached(); - } - case PLUGIN_GEN_AFTER_INSN: - switch (type) { - case PLUGIN_GEN_DISABLE_MEM_HELPER: - plugin_gen_disable_mem_helper(ptb, begin_op, insn_idx); - return; - default: - g_assert_not_reached(); - } - default: - g_assert_not_reached(); - } -} - /* #define DEBUG_PLUGIN_GEN_OPS */ static void pr_ops(void) { @@ -820,21 +759,95 @@ static void pr_ops(void) static void plugin_gen_inject(const struct qemu_plugin_tb *plugin_tb) { TCGOp *op; - int insn_idx; + int insn_idx = -1; pr_ops(); - insn_idx = -1; - QSIMPLEQ_FOREACH(op, &tcg_ctx->plugin_ops, plugin_link) { - enum plugin_gen_from from = op->args[0]; - enum plugin_gen_cb type = op->args[1]; - tcg_debug_assert(op->opc == INDEX_op_plugin_cb_start); - /* ENABLE_MEM_HELPER is the first callback of an instruction */ - if (from == PLUGIN_GEN_FROM_INSN && - type == PLUGIN_GEN_ENABLE_MEM_HELPER) { + QTAILQ_FOREACH(op, &tcg_ctx->ops, link) { + switch (op->opc) { + case INDEX_op_insn_start: insn_idx++; + break; + case INDEX_op_plugin_cb_start: + { + enum plugin_gen_from from = op->args[0]; + enum plugin_gen_cb type = op->args[1]; + + switch (from) { + case PLUGIN_GEN_FROM_TB: + { + g_assert(insn_idx == -1); + + switch (type) { + case PLUGIN_GEN_CB_UDATA: + plugin_gen_tb_udata(plugin_tb, op); + break; + case PLUGIN_GEN_CB_INLINE: + plugin_gen_tb_inline(plugin_tb, op); + break; + default: + g_assert_not_reached(); + } + break; + } + case PLUGIN_GEN_FROM_INSN: + { + g_assert(insn_idx >= 0); + + switch (type) { + case PLUGIN_GEN_CB_UDATA: + plugin_gen_insn_udata(plugin_tb, op, insn_idx); + break; + case PLUGIN_GEN_CB_INLINE: + plugin_gen_insn_inline(plugin_tb, op, insn_idx); + break; + case PLUGIN_GEN_ENABLE_MEM_HELPER: + plugin_gen_enable_mem_helper(plugin_tb, op, insn_idx); + break; + default: + g_assert_not_reached(); + } + break; + } + case PLUGIN_GEN_FROM_MEM: + { + g_assert(insn_idx >= 0); + + switch (type) { + case PLUGIN_GEN_CB_MEM: + plugin_gen_mem_regular(plugin_tb, op, insn_idx); + break; + case PLUGIN_GEN_CB_INLINE: + plugin_gen_mem_inline(plugin_tb, op, insn_idx); + break; + default: + g_assert_not_reached(); + } + + break; + } + case PLUGIN_GEN_AFTER_INSN: + { + g_assert(insn_idx >= 0); + + switch (type) { + case PLUGIN_GEN_DISABLE_MEM_HELPER: + plugin_gen_disable_mem_helper(plugin_tb, op, insn_idx); + break; + default: + g_assert_not_reached(); + } + break; + } + default: + g_assert_not_reached(); + } + break; + } + default: + /* plugins don't care about any other ops */ + break; } - plugin_inject_cb(plugin_tb, op, insn_idx); } pr_ops(); } @@ -847,7 +860,6 @@ bool plugin_gen_tb_start(CPUState *cpu, const TranslationBlock *tb, bool mem_onl if (test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS, cpu->plugin_mask)) { ret = true; - QSIMPLEQ_INIT(&tcg_ctx->plugin_ops); ptb->vaddr = tb->pc; ptb->vaddr2 = -1; get_page_addr_code_hostp(cpu->env_ptr, tb->pc, &ptb->haddr1); @@ -864,9 +876,8 @@ void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db) struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb; struct qemu_plugin_insn *pinsn; - pinsn = qemu_plugin_tb_insn_get(ptb); + pinsn = qemu_plugin_tb_insn_get(ptb, db->pc_next); tcg_ctx->plugin_insn = pinsn; - pinsn->vaddr = db->pc_next; plugin_gen_empty_callback(PLUGIN_GEN_FROM_INSN); /* diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c index 847d2079d2..29632bd4c0 100644 --- a/accel/tcg/tcg-accel-ops-mttcg.c +++ b/accel/tcg/tcg-accel-ops-mttcg.c @@ -28,6 +28,7 @@ #include "sysemu/tcg.h" #include "sysemu/replay.h" #include "qemu/main-loop.h" +#include "qemu/notify.h" #include "qemu/guest-random.h" #include "exec/exec-all.h" #include "hw/boards.h" @@ -35,6 +36,26 @@ #include "tcg-accel-ops.h" #include "tcg-accel-ops-mttcg.h" +typedef struct MttcgForceRcuNotifier { + Notifier notifier; + CPUState *cpu; +} MttcgForceRcuNotifier; + +static void do_nothing(CPUState *cpu, run_on_cpu_data d) +{ +} + +static void mttcg_force_rcu(Notifier *notify, void *data) +{ + CPUState *cpu = container_of(notify, MttcgForceRcuNotifier, notifier)->cpu; + + /* + * Called with rcu_registry_lock held, using async_run_on_cpu() ensures + * that there are no deadlocks. + */ + async_run_on_cpu(cpu, do_nothing, RUN_ON_CPU_NULL); +} + /* * In the multi-threaded case each vCPU has its own thread. The TLS * variable current_cpu can be used deep in the code to find the @@ -43,12 +64,16 @@ static void *mttcg_cpu_thread_fn(void *arg) { + MttcgForceRcuNotifier force_rcu; CPUState *cpu = arg; assert(tcg_enabled()); g_assert(!icount_enabled()); rcu_register_thread(); + force_rcu.notifier.notify = mttcg_force_rcu; + force_rcu.cpu = cpu; + rcu_add_force_rcu_notifier(&force_rcu.notifier); tcg_register_thread(); qemu_mutex_lock_iothread(); @@ -100,6 +125,7 @@ static void *mttcg_cpu_thread_fn(void *arg) tcg_cpus_destroy(cpu); qemu_mutex_unlock_iothread(); + rcu_remove_force_rcu_notifier(&force_rcu.notifier); rcu_unregister_thread(); return NULL; } diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c index a5fd26190e..bf59f53dbc 100644 --- a/accel/tcg/tcg-accel-ops-rr.c +++ b/accel/tcg/tcg-accel-ops-rr.c @@ -28,6 +28,7 @@ #include "sysemu/tcg.h" #include "sysemu/replay.h" #include "qemu/main-loop.h" +#include "qemu/notify.h" #include "qemu/guest-random.h" #include "exec/exec-all.h" @@ -133,6 +134,11 @@ static void rr_deal_with_unplugged_cpus(void) } } +static void rr_force_rcu(Notifier *notify, void *data) +{ + rr_kick_next_cpu(); +} + /* * In the single-threaded case each vCPU is simulated in turn. If * there is more than a single vCPU we create a simple timer to kick @@ -143,10 +149,13 @@ static void rr_deal_with_unplugged_cpus(void) static void *rr_cpu_thread_fn(void *arg) { + Notifier force_rcu; CPUState *cpu = arg; assert(tcg_enabled()); rcu_register_thread(); + force_rcu.notify = rr_force_rcu; + rcu_add_force_rcu_notifier(&force_rcu); tcg_register_thread(); qemu_mutex_lock_iothread(); @@ -255,6 +264,7 @@ static void *rr_cpu_thread_fn(void *arg) rr_deal_with_unplugged_cpus(); } + rcu_remove_force_rcu_notifier(&force_rcu); rcu_unregister_thread(); return NULL; } diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index 12e21a19e1..6e400e3142 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -2424,7 +2424,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) cpu_loop_exit_noexc(cpu); } -static void print_qht_statistics(struct qht_stats hst) +static void print_qht_statistics(struct qht_stats hst, GString *buf) { uint32_t hgram_opts; size_t hgram_bins; @@ -2433,9 +2433,11 @@ static void print_qht_statistics(struct qht_stats hst) if (!hst.head_buckets) { return; } - qemu_printf("TB hash buckets %zu/%zu (%0.2f%% head buckets used)\n", - hst.used_head_buckets, hst.head_buckets, - (double)hst.used_head_buckets / hst.head_buckets * 100); + g_string_append_printf(buf, "TB hash buckets %zu/%zu " + "(%0.2f%% head buckets used)\n", + hst.used_head_buckets, hst.head_buckets, + (double)hst.used_head_buckets / + hst.head_buckets * 100); hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT; @@ -2443,8 +2445,9 @@ static void print_qht_statistics(struct qht_stats hst) hgram_opts |= QDIST_PR_NODECIMAL; } hgram = qdist_pr(&hst.occupancy, 10, hgram_opts); - qemu_printf("TB hash occupancy %0.2f%% avg chain occ. Histogram: %s\n", - qdist_avg(&hst.occupancy) * 100, hgram); + g_string_append_printf(buf, "TB hash occupancy %0.2f%% avg chain occ. " + "Histogram: %s\n", + qdist_avg(&hst.occupancy) * 100, hgram); g_free(hgram); hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS; @@ -2456,8 +2459,9 @@ static void print_qht_statistics(struct qht_stats hst) hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE; } hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts); - qemu_printf("TB hash avg chain %0.3f buckets. Histogram: %s\n", - qdist_avg(&hst.chain), hgram); + g_string_append_printf(buf, "TB hash avg chain %0.3f buckets. " + "Histogram: %s\n", + qdist_avg(&hst.chain), hgram); g_free(hgram); } @@ -2494,7 +2498,7 @@ static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data) return false; } -void dump_exec_info(void) +void dump_exec_info(GString *buf) { struct tb_tree_stats tst = {}; struct qht_stats hst; @@ -2503,49 +2507,53 @@ void dump_exec_info(void) tcg_tb_foreach(tb_tree_stats_iter, &tst); nb_tbs = tst.nb_tbs; /* XXX: avoid using doubles ? */ - qemu_printf("Translation buffer state:\n"); + g_string_append_printf(buf, "Translation buffer state:\n"); /* * Report total code size including the padding and TB structs; * otherwise users might think "-accel tcg,tb-size" is not honoured. * For avg host size we use the precise numbers from tb_tree_stats though. */ - qemu_printf("gen code size %zu/%zu\n", - tcg_code_size(), tcg_code_capacity()); - qemu_printf("TB count %zu\n", nb_tbs); - qemu_printf("TB avg target size %zu max=%zu bytes\n", - nb_tbs ? tst.target_size / nb_tbs : 0, - tst.max_target_size); - qemu_printf("TB avg host size %zu bytes (expansion ratio: %0.1f)\n", - nb_tbs ? tst.host_size / nb_tbs : 0, - tst.target_size ? (double)tst.host_size / tst.target_size : 0); - qemu_printf("cross page TB count %zu (%zu%%)\n", tst.cross_page, - nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0); - qemu_printf("direct jump count %zu (%zu%%) (2 jumps=%zu %zu%%)\n", - tst.direct_jmp_count, - nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0, - tst.direct_jmp2_count, - nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0); + g_string_append_printf(buf, "gen code size %zu/%zu\n", + tcg_code_size(), tcg_code_capacity()); + g_string_append_printf(buf, "TB count %zu\n", nb_tbs); + g_string_append_printf(buf, "TB avg target size %zu max=%zu bytes\n", + nb_tbs ? tst.target_size / nb_tbs : 0, + tst.max_target_size); + g_string_append_printf(buf, "TB avg host size %zu bytes " + "(expansion ratio: %0.1f)\n", + nb_tbs ? tst.host_size / nb_tbs : 0, + tst.target_size ? + (double)tst.host_size / tst.target_size : 0); + g_string_append_printf(buf, "cross page TB count %zu (%zu%%)\n", + tst.cross_page, + nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0); + g_string_append_printf(buf, "direct jump count %zu (%zu%%) " + "(2 jumps=%zu %zu%%)\n", + tst.direct_jmp_count, + nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0, + tst.direct_jmp2_count, + nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0); qht_statistics_init(&tb_ctx.htable, &hst); - print_qht_statistics(hst); + print_qht_statistics(hst, buf); qht_statistics_destroy(&hst); - qemu_printf("\nStatistics:\n"); - qemu_printf("TB flush count %u\n", - qatomic_read(&tb_ctx.tb_flush_count)); - qemu_printf("TB invalidate count %u\n", - qatomic_read(&tb_ctx.tb_phys_invalidate_count)); + g_string_append_printf(buf, "\nStatistics:\n"); + g_string_append_printf(buf, "TB flush count %u\n", + qatomic_read(&tb_ctx.tb_flush_count)); + g_string_append_printf(buf, "TB invalidate count %u\n", + qatomic_read(&tb_ctx.tb_phys_invalidate_count)); tlb_flush_counts(&flush_full, &flush_part, &flush_elide); - qemu_printf("TLB full flushes %zu\n", flush_full); - qemu_printf("TLB partial flushes %zu\n", flush_part); - qemu_printf("TLB elided flushes %zu\n", flush_elide); - tcg_dump_info(); + g_string_append_printf(buf, "TLB full flushes %zu\n", flush_full); + g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part); + g_string_append_printf(buf, "TLB elided flushes %zu\n", flush_elide); + tcg_dump_info(buf); } -void dump_opcount_info(void) +void dump_opcount_info(GString *buf) { - tcg_dump_op_count(); + tcg_dump_op_count(buf); } #else /* CONFIG_USER_ONLY */ diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c index 4c0133d0ab..ccaf171737 100644 --- a/accel/tcg/translator.c +++ b/accel/tcg/translator.c @@ -215,7 +215,7 @@ static inline void translator_maybe_page_protect(DisasContextBase *dcbase, if (do_swap) { \ ret = swap_fn(ret); \ } \ - plugin_insn_append(&ret, sizeof(ret)); \ + plugin_insn_append(pc, &ret, sizeof(ret)); \ return ret; \ } diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index 8fed542622..1528a21fad 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -27,48 +27,18 @@ #include "exec/helper-proto.h" #include "qemu/atomic128.h" #include "trace/trace-root.h" -#include "trace/mem.h" - -#undef EAX -#undef ECX -#undef EDX -#undef EBX -#undef ESP -#undef EBP -#undef ESI -#undef EDI -#undef EIP -#ifdef __linux__ -#include -#endif +#include "tcg/tcg-ldst.h" +#include "internal.h" __thread uintptr_t helper_retaddr; //#define DEBUG_SIGNAL -/* exit the current TB from a signal handler. The host registers are - restored in a state compatible with the CPU emulator +/* + * Adjust the pc to pass to cpu_restore_state; return the memop type. */ -static void QEMU_NORETURN cpu_exit_tb_from_sighandler(CPUState *cpu, - sigset_t *old_set) +MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write) { - /* XXX: use siglongjmp ? */ - sigprocmask(SIG_SETMASK, old_set, NULL); - cpu_loop_exit_noexc(cpu); -} - -/* 'pc' is the host PC at which the exception was raised. 'address' is - the effective address of the memory exception. 'is_write' is 1 if a - write caused the exception and otherwise 0'. 'old_set' is the - signal set which should be restored */ -static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, - int is_write, sigset_t *old_set) -{ - CPUState *cpu = current_cpu; - CPUClass *cc; - unsigned long address = (unsigned long)info->si_addr; - MMUAccessType access_type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD; - switch (helper_retaddr) { default: /* @@ -77,7 +47,7 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, * pointer into the generated code that will unwind to the * correct guest pc. */ - pc = helper_retaddr; + *pc = helper_retaddr; break; case 0: @@ -97,7 +67,7 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, * Therefore, adjust to compensate for what will be done later * by cpu_restore_state_from_tb. */ - pc += GETPC_ADJ; + *pc += GETPC_ADJ; break; case 1: @@ -113,118 +83,97 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info, * * Like tb_gen_code, release the memory lock before cpu_loop_exit. */ - pc = 0; - access_type = MMU_INST_FETCH; mmap_unlock(); - break; + *pc = 0; + return MMU_INST_FETCH; } - /* For synchronous signals we expect to be coming from the vCPU - * thread (so current_cpu should be valid) and either from running - * code or during translation which can fault as we cross pages. - * - * If neither is true then something has gone wrong and we should - * abort rather than try and restart the vCPU execution. - */ - if (!cpu || !cpu->running) { - printf("qemu:%s received signal outside vCPU context @ pc=0x%" - PRIxPTR "\n", __func__, pc); - abort(); + return is_write ? MMU_DATA_STORE : MMU_DATA_LOAD; +} + +/** + * handle_sigsegv_accerr_write: + * @cpu: the cpu context + * @old_set: the sigset_t from the signal ucontext_t + * @host_pc: the host pc, adjusted for the signal + * @guest_addr: the guest address of the fault + * + * Return true if the write fault has been handled, and should be re-tried. + * + * Note that it is important that we don't call page_unprotect() unless + * this is really a "write to nonwriteable page" fault, because + * page_unprotect() assumes that if it is called for an access to + * a page that's writeable this means we had two threads racing and + * another thread got there first and already made the page writeable; + * so we will retry the access. If we were to call page_unprotect() + * for some other kind of fault that should really be passed to the + * guest, we'd end up in an infinite loop of retrying the faulting access. + */ +bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set, + uintptr_t host_pc, abi_ptr guest_addr) +{ + switch (page_unprotect(guest_addr, host_pc)) { + case 0: + /* + * Fault not caused by a page marked unwritable to protect + * cached translations, must be the guest binary's problem. + */ + return false; + case 1: + /* + * Fault caused by protection of cached translation; TBs + * invalidated, so resume execution. + */ + return true; + case 2: + /* + * Fault caused by protection of cached translation, and the + * currently executing TB was modified and must be exited immediately. + */ + sigprocmask(SIG_SETMASK, old_set, NULL); + cpu_loop_exit_noexc(cpu); + /* NORETURN */ + default: + g_assert_not_reached(); } - -#if defined(DEBUG_SIGNAL) - printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n", - pc, address, is_write, *(unsigned long *)old_set); -#endif - /* XXX: locking issue */ - /* Note that it is important that we don't call page_unprotect() unless - * this is really a "write to nonwriteable page" fault, because - * page_unprotect() assumes that if it is called for an access to - * a page that's writeable this means we had two threads racing and - * another thread got there first and already made the page writeable; - * so we will retry the access. If we were to call page_unprotect() - * for some other kind of fault that should really be passed to the - * guest, we'd end up in an infinite loop of retrying the faulting - * access. - */ - if (is_write && info->si_signo == SIGSEGV && info->si_code == SEGV_ACCERR && - h2g_valid(address)) { - switch (page_unprotect(h2g(address), pc)) { - case 0: - /* Fault not caused by a page marked unwritable to protect - * cached translations, must be the guest binary's problem. - */ - break; - case 1: - /* Fault caused by protection of cached translation; TBs - * invalidated, so resume execution. Retain helper_retaddr - * for a possible second fault. - */ - return 1; - case 2: - /* Fault caused by protection of cached translation, and the - * currently executing TB was modified and must be exited - * immediately. Clear helper_retaddr for next execution. - */ - clear_helper_retaddr(); - cpu_exit_tb_from_sighandler(cpu, old_set); - /* NORETURN */ - - default: - g_assert_not_reached(); - } - } - - /* Convert forcefully to guest address space, invalid addresses - are still valid segv ones */ - address = h2g_nocheck(address); - - /* - * There is no way the target can handle this other than raising - * an exception. Undo signal and retaddr state prior to longjmp. - */ - sigprocmask(SIG_SETMASK, old_set, NULL); - clear_helper_retaddr(); - - cc = CPU_GET_CLASS(cpu); - cc->tcg_ops->tlb_fill(cpu, address, 0, access_type, - MMU_USER_IDX, false, pc); - g_assert_not_reached(); } static int probe_access_internal(CPUArchState *env, target_ulong addr, int fault_size, MMUAccessType access_type, bool nonfault, uintptr_t ra) { - int flags; + int acc_flag; + bool maperr; switch (access_type) { case MMU_DATA_STORE: - flags = PAGE_WRITE; + acc_flag = PAGE_WRITE_ORG; break; case MMU_DATA_LOAD: - flags = PAGE_READ; + acc_flag = PAGE_READ; break; case MMU_INST_FETCH: - flags = PAGE_EXEC; + acc_flag = PAGE_EXEC; break; default: g_assert_not_reached(); } - if (!guest_addr_valid_untagged(addr) || - page_check_range(addr, 1, flags) < 0) { - if (nonfault) { - return TLB_INVALID_MASK; - } else { - CPUState *cpu = env_cpu(env); - CPUClass *cc = CPU_GET_CLASS(cpu); - cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type, - MMU_USER_IDX, false, ra); - g_assert_not_reached(); + if (guest_addr_valid_untagged(addr)) { + int page_flags = page_get_flags(addr); + if (page_flags & acc_flag) { + return 0; /* success */ } + maperr = !(page_flags & PAGE_VALID); + } else { + maperr = true; } - return 0; + + if (nonfault) { + return TLB_INVALID_MASK; + } + + cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra); } int probe_access_flags(CPUArchState *env, target_ulong addr, @@ -250,969 +199,244 @@ void *probe_access(CPUArchState *env, target_ulong addr, int size, return size ? g2h(env_cpu(env), addr) : NULL; } -#if defined(__i386__) - -#if defined(__NetBSD__) -#include -#include - -#define EIP_sig(context) ((context)->uc_mcontext.__gregs[_REG_EIP]) -#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO]) -#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR]) -#define MASK_sig(context) ((context)->uc_sigmask) -#define PAGE_FAULT_TRAP T_PAGEFLT -#elif defined(__FreeBSD__) || defined(__DragonFly__) -#include -#include - -#define EIP_sig(context) (*((unsigned long *)&(context)->uc_mcontext.mc_eip)) -#define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno) -#define ERROR_sig(context) ((context)->uc_mcontext.mc_err) -#define MASK_sig(context) ((context)->uc_sigmask) -#define PAGE_FAULT_TRAP T_PAGEFLT -#elif defined(__OpenBSD__) -#include -#define EIP_sig(context) ((context)->sc_eip) -#define TRAP_sig(context) ((context)->sc_trapno) -#define ERROR_sig(context) ((context)->sc_err) -#define MASK_sig(context) ((context)->sc_mask) -#define PAGE_FAULT_TRAP T_PAGEFLT -#else -#define EIP_sig(context) ((context)->uc_mcontext.gregs[REG_EIP]) -#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO]) -#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR]) -#define MASK_sig(context) ((context)->uc_sigmask) -#define PAGE_FAULT_TRAP 0xe -#endif - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; -#if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__) - ucontext_t *uc = puc; -#elif defined(__OpenBSD__) - struct sigcontext *uc = puc; -#else - ucontext_t *uc = puc; -#endif - unsigned long pc; - int trapno; - -#ifndef REG_EIP -/* for glibc 2.1 */ -#define REG_EIP EIP -#define REG_ERR ERR -#define REG_TRAPNO TRAPNO -#endif - pc = EIP_sig(uc); - trapno = TRAP_sig(uc); - return handle_cpu_signal(pc, info, - trapno == PAGE_FAULT_TRAP ? - (ERROR_sig(uc) >> 1) & 1 : 0, - &MASK_sig(uc)); -} - -#elif defined(__x86_64__) - -#ifdef __NetBSD__ -#include -#define PC_sig(context) _UC_MACHINE_PC(context) -#define TRAP_sig(context) ((context)->uc_mcontext.__gregs[_REG_TRAPNO]) -#define ERROR_sig(context) ((context)->uc_mcontext.__gregs[_REG_ERR]) -#define MASK_sig(context) ((context)->uc_sigmask) -#define PAGE_FAULT_TRAP T_PAGEFLT -#elif defined(__OpenBSD__) -#include -#define PC_sig(context) ((context)->sc_rip) -#define TRAP_sig(context) ((context)->sc_trapno) -#define ERROR_sig(context) ((context)->sc_err) -#define MASK_sig(context) ((context)->sc_mask) -#define PAGE_FAULT_TRAP T_PAGEFLT -#elif defined(__FreeBSD__) || defined(__DragonFly__) -#include -#include - -#define PC_sig(context) (*((unsigned long *)&(context)->uc_mcontext.mc_rip)) -#define TRAP_sig(context) ((context)->uc_mcontext.mc_trapno) -#define ERROR_sig(context) ((context)->uc_mcontext.mc_err) -#define MASK_sig(context) ((context)->uc_sigmask) -#define PAGE_FAULT_TRAP T_PAGEFLT -#else -#define PC_sig(context) ((context)->uc_mcontext.gregs[REG_RIP]) -#define TRAP_sig(context) ((context)->uc_mcontext.gregs[REG_TRAPNO]) -#define ERROR_sig(context) ((context)->uc_mcontext.gregs[REG_ERR]) -#define MASK_sig(context) ((context)->uc_sigmask) -#define PAGE_FAULT_TRAP 0xe -#endif - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - unsigned long pc; -#if defined(__NetBSD__) || defined(__FreeBSD__) || defined(__DragonFly__) - ucontext_t *uc = puc; -#elif defined(__OpenBSD__) - struct sigcontext *uc = puc; -#else - ucontext_t *uc = puc; -#endif - - pc = PC_sig(uc); - return handle_cpu_signal(pc, info, - TRAP_sig(uc) == PAGE_FAULT_TRAP ? - (ERROR_sig(uc) >> 1) & 1 : 0, - &MASK_sig(uc)); -} - -#elif defined(_ARCH_PPC) - -/*********************************************************************** - * signal context platform-specific definitions - * From Wine - */ -#ifdef linux -/* All Registers access - only for local access */ -#define REG_sig(reg_name, context) \ - ((context)->uc_mcontext.regs->reg_name) -/* Gpr Registers access */ -#define GPR_sig(reg_num, context) REG_sig(gpr[reg_num], context) -/* Program counter */ -#define IAR_sig(context) REG_sig(nip, context) -/* Machine State Register (Supervisor) */ -#define MSR_sig(context) REG_sig(msr, context) -/* Count register */ -#define CTR_sig(context) REG_sig(ctr, context) -/* User's integer exception register */ -#define XER_sig(context) REG_sig(xer, context) -/* Link register */ -#define LR_sig(context) REG_sig(link, context) -/* Condition register */ -#define CR_sig(context) REG_sig(ccr, context) - -/* Float Registers access */ -#define FLOAT_sig(reg_num, context) \ - (((double *)((char *)((context)->uc_mcontext.regs + 48 * 4)))[reg_num]) -#define FPSCR_sig(context) \ - (*(int *)((char *)((context)->uc_mcontext.regs + (48 + 32 * 2) * 4))) -/* Exception Registers access */ -#define DAR_sig(context) REG_sig(dar, context) -#define DSISR_sig(context) REG_sig(dsisr, context) -#define TRAP_sig(context) REG_sig(trap, context) -#endif /* linux */ - -#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) -#include -#define IAR_sig(context) ((context)->uc_mcontext.mc_srr0) -#define MSR_sig(context) ((context)->uc_mcontext.mc_srr1) -#define CTR_sig(context) ((context)->uc_mcontext.mc_ctr) -#define XER_sig(context) ((context)->uc_mcontext.mc_xer) -#define LR_sig(context) ((context)->uc_mcontext.mc_lr) -#define CR_sig(context) ((context)->uc_mcontext.mc_cr) -/* Exception Registers access */ -#define DAR_sig(context) ((context)->uc_mcontext.mc_dar) -#define DSISR_sig(context) ((context)->uc_mcontext.mc_dsisr) -#define TRAP_sig(context) ((context)->uc_mcontext.mc_exc) -#endif /* __FreeBSD__|| __FreeBSD_kernel__ */ - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; -#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) - ucontext_t *uc = puc; -#else - ucontext_t *uc = puc; -#endif - unsigned long pc; - int is_write; - - pc = IAR_sig(uc); - is_write = 0; -#if 0 - /* ppc 4xx case */ - if (DSISR_sig(uc) & 0x00800000) { - is_write = 1; - } -#else - if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000)) { - is_write = 1; - } -#endif - return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); -} - -#elif defined(__alpha__) - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - ucontext_t *uc = puc; - uint32_t *pc = uc->uc_mcontext.sc_pc; - uint32_t insn = *pc; - int is_write = 0; - - /* XXX: need kernel patch to get write flag faster */ - switch (insn >> 26) { - case 0x0d: /* stw */ - case 0x0e: /* stb */ - case 0x0f: /* stq_u */ - case 0x24: /* stf */ - case 0x25: /* stg */ - case 0x26: /* sts */ - case 0x27: /* stt */ - case 0x2c: /* stl */ - case 0x2d: /* stq */ - case 0x2e: /* stl_c */ - case 0x2f: /* stq_c */ - is_write = 1; - } - - return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); -} -#elif defined(__sparc__) - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - int is_write; - uint32_t insn; -#if !defined(__arch64__) || defined(CONFIG_SOLARIS) - uint32_t *regs = (uint32_t *)(info + 1); - void *sigmask = (regs + 20); - /* XXX: is there a standard glibc define ? */ - unsigned long pc = regs[1]; -#else -#ifdef __linux__ - struct sigcontext *sc = puc; - unsigned long pc = sc->sigc_regs.tpc; - void *sigmask = (void *)sc->sigc_mask; -#elif defined(__OpenBSD__) - struct sigcontext *uc = puc; - unsigned long pc = uc->sc_pc; - void *sigmask = (void *)(long)uc->sc_mask; -#elif defined(__NetBSD__) - ucontext_t *uc = puc; - unsigned long pc = _UC_MACHINE_PC(uc); - void *sigmask = (void *)&uc->uc_sigmask; -#endif -#endif - - /* XXX: need kernel patch to get write flag faster */ - is_write = 0; - insn = *(uint32_t *)pc; - if ((insn >> 30) == 3) { - switch ((insn >> 19) & 0x3f) { - case 0x05: /* stb */ - case 0x15: /* stba */ - case 0x06: /* sth */ - case 0x16: /* stha */ - case 0x04: /* st */ - case 0x14: /* sta */ - case 0x07: /* std */ - case 0x17: /* stda */ - case 0x0e: /* stx */ - case 0x1e: /* stxa */ - case 0x24: /* stf */ - case 0x34: /* stfa */ - case 0x27: /* stdf */ - case 0x37: /* stdfa */ - case 0x26: /* stqf */ - case 0x36: /* stqfa */ - case 0x25: /* stfsr */ - case 0x3c: /* casa */ - case 0x3e: /* casxa */ - is_write = 1; - break; - } - } - return handle_cpu_signal(pc, info, is_write, sigmask); -} - -#elif defined(__arm__) - -#if defined(__NetBSD__) -#include -#include -#endif - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; -#if defined(__NetBSD__) - ucontext_t *uc = puc; - siginfo_t *si = pinfo; -#else - ucontext_t *uc = puc; -#endif - unsigned long pc; - uint32_t fsr; - int is_write; - -#if defined(__NetBSD__) - pc = uc->uc_mcontext.__gregs[_REG_R15]; -#elif defined(__GLIBC__) && (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3)) - pc = uc->uc_mcontext.gregs[R15]; -#else - pc = uc->uc_mcontext.arm_pc; -#endif - -#ifdef __NetBSD__ - fsr = si->si_trap; -#else - fsr = uc->uc_mcontext.error_code; -#endif - /* - * In the FSR, bit 11 is WnR, assuming a v6 or - * later processor. On v5 we will always report - * this as a read, which will fail later. - */ - is_write = extract32(fsr, 11, 1); - return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); -} - -#elif defined(__aarch64__) - -#if defined(__NetBSD__) - -#include -#include - -int cpu_signal_handler(int host_signum, void *pinfo, void *puc) -{ - ucontext_t *uc = puc; - siginfo_t *si = pinfo; - unsigned long pc; - int is_write; - uint32_t esr; - - pc = uc->uc_mcontext.__gregs[_REG_PC]; - esr = si->si_trap; - - /* - * siginfo_t::si_trap is the ESR value, for data aborts ESR.EC - * is 0b10010x: then bit 6 is the WnR bit - */ - is_write = extract32(esr, 27, 5) == 0x12 && extract32(esr, 6, 1) == 1; - return handle_cpu_signal(pc, si, is_write, &uc->uc_sigmask); -} - -#else - -#ifndef ESR_MAGIC -/* Pre-3.16 kernel headers don't have these, so provide fallback definitions */ -#define ESR_MAGIC 0x45535201 -struct esr_context { - struct _aarch64_ctx head; - uint64_t esr; -}; -#endif - -static inline struct _aarch64_ctx *first_ctx(ucontext_t *uc) -{ - return (struct _aarch64_ctx *)&uc->uc_mcontext.__reserved; -} - -static inline struct _aarch64_ctx *next_ctx(struct _aarch64_ctx *hdr) -{ - return (struct _aarch64_ctx *)((char *)hdr + hdr->size); -} - -int cpu_signal_handler(int host_signum, void *pinfo, void *puc) -{ - siginfo_t *info = pinfo; - ucontext_t *uc = puc; - uintptr_t pc = uc->uc_mcontext.pc; - bool is_write; - struct _aarch64_ctx *hdr; - struct esr_context const *esrctx = NULL; - - /* Find the esr_context, which has the WnR bit in it */ - for (hdr = first_ctx(uc); hdr->magic; hdr = next_ctx(hdr)) { - if (hdr->magic == ESR_MAGIC) { - esrctx = (struct esr_context const *)hdr; - break; - } - } - - if (esrctx) { - /* For data aborts ESR.EC is 0b10010x: then bit 6 is the WnR bit */ - uint64_t esr = esrctx->esr; - is_write = extract32(esr, 27, 5) == 0x12 && extract32(esr, 6, 1) == 1; - } else { - /* - * Fall back to parsing instructions; will only be needed - * for really ancient (pre-3.16) kernels. - */ - uint32_t insn = *(uint32_t *)pc; - - is_write = ((insn & 0xbfff0000) == 0x0c000000 /* C3.3.1 */ - || (insn & 0xbfe00000) == 0x0c800000 /* C3.3.2 */ - || (insn & 0xbfdf0000) == 0x0d000000 /* C3.3.3 */ - || (insn & 0xbfc00000) == 0x0d800000 /* C3.3.4 */ - || (insn & 0x3f400000) == 0x08000000 /* C3.3.6 */ - || (insn & 0x3bc00000) == 0x39000000 /* C3.3.13 */ - || (insn & 0x3fc00000) == 0x3d800000 /* ... 128bit */ - /* Ignore bits 10, 11 & 21, controlling indexing. */ - || (insn & 0x3bc00000) == 0x38000000 /* C3.3.8-12 */ - || (insn & 0x3fe00000) == 0x3c800000 /* ... 128bit */ - /* Ignore bits 23 & 24, controlling indexing. */ - || (insn & 0x3a400000) == 0x28000000); /* C3.3.7,14-16 */ - } - return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); -} -#endif - -#elif defined(__s390__) - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - ucontext_t *uc = puc; - unsigned long pc; - uint16_t *pinsn; - int is_write = 0; - - pc = uc->uc_mcontext.psw.addr; - - /* - * ??? On linux, the non-rt signal handler has 4 (!) arguments instead - * of the normal 2 arguments. The 4th argument contains the "Translation- - * Exception Identification for DAT Exceptions" from the hardware (aka - * "int_parm_long"), which does in fact contain the is_write value. - * The rt signal handler, as far as I can tell, does not give this value - * at all. Not that we could get to it from here even if it were. - * So fall back to parsing instructions. Treat read-modify-write ones as - * writes, which is not fully correct, but for tracking self-modifying code - * this is better than treating them as reads. Checking si_addr page flags - * might be a viable improvement, albeit a racy one. - */ - /* ??? This is not even close to complete. */ - pinsn = (uint16_t *)pc; - switch (pinsn[0] >> 8) { - case 0x50: /* ST */ - case 0x42: /* STC */ - case 0x40: /* STH */ - case 0xba: /* CS */ - case 0xbb: /* CDS */ - is_write = 1; - break; - case 0xc4: /* RIL format insns */ - switch (pinsn[0] & 0xf) { - case 0xf: /* STRL */ - case 0xb: /* STGRL */ - case 0x7: /* STHRL */ - is_write = 1; - } - break; - case 0xc8: /* SSF format insns */ - switch (pinsn[0] & 0xf) { - case 0x2: /* CSST */ - is_write = 1; - } - break; - case 0xe3: /* RXY format insns */ - switch (pinsn[2] & 0xff) { - case 0x50: /* STY */ - case 0x24: /* STG */ - case 0x72: /* STCY */ - case 0x70: /* STHY */ - case 0x8e: /* STPQ */ - case 0x3f: /* STRVH */ - case 0x3e: /* STRV */ - case 0x2f: /* STRVG */ - is_write = 1; - } - break; - case 0xeb: /* RSY format insns */ - switch (pinsn[2] & 0xff) { - case 0x14: /* CSY */ - case 0x30: /* CSG */ - case 0x31: /* CDSY */ - case 0x3e: /* CDSG */ - case 0xe4: /* LANG */ - case 0xe6: /* LAOG */ - case 0xe7: /* LAXG */ - case 0xe8: /* LAAG */ - case 0xea: /* LAALG */ - case 0xf4: /* LAN */ - case 0xf6: /* LAO */ - case 0xf7: /* LAX */ - case 0xfa: /* LAAL */ - case 0xf8: /* LAA */ - is_write = 1; - } - break; - } - - return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); -} - -#elif defined(__mips__) - -#if defined(__misp16) || defined(__mips_micromips) -#error "Unsupported encoding" -#endif - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - ucontext_t *uc = puc; - uintptr_t pc = uc->uc_mcontext.pc; - uint32_t insn = *(uint32_t *)pc; - int is_write = 0; - - /* Detect all store instructions at program counter. */ - switch((insn >> 26) & 077) { - case 050: /* SB */ - case 051: /* SH */ - case 052: /* SWL */ - case 053: /* SW */ - case 054: /* SDL */ - case 055: /* SDR */ - case 056: /* SWR */ - case 070: /* SC */ - case 071: /* SWC1 */ - case 074: /* SCD */ - case 075: /* SDC1 */ - case 077: /* SD */ -#if !defined(__mips_isa_rev) || __mips_isa_rev < 6 - case 072: /* SWC2 */ - case 076: /* SDC2 */ -#endif - is_write = 1; - break; - case 023: /* COP1X */ - /* Required in all versions of MIPS64 since - MIPS64r1 and subsequent versions of MIPS32r2. */ - switch (insn & 077) { - case 010: /* SWXC1 */ - case 011: /* SDXC1 */ - case 015: /* SUXC1 */ - is_write = 1; - } - break; - } - - return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); -} - -#elif defined(__riscv) - -int cpu_signal_handler(int host_signum, void *pinfo, - void *puc) -{ - siginfo_t *info = pinfo; - ucontext_t *uc = puc; - greg_t pc = uc->uc_mcontext.__gregs[REG_PC]; - uint32_t insn = *(uint32_t *)pc; - int is_write = 0; - - /* Detect store by reading the instruction at the program - counter. Note: we currently only generate 32-bit - instructions so we thus only detect 32-bit stores */ - switch (((insn >> 0) & 0b11)) { - case 3: - switch (((insn >> 2) & 0b11111)) { - case 8: - switch (((insn >> 12) & 0b111)) { - case 0: /* sb */ - case 1: /* sh */ - case 2: /* sw */ - case 3: /* sd */ - case 4: /* sq */ - is_write = 1; - break; - default: - break; - } - break; - case 9: - switch (((insn >> 12) & 0b111)) { - case 2: /* fsw */ - case 3: /* fsd */ - case 4: /* fsq */ - is_write = 1; - break; - default: - break; - } - break; - default: - break; - } - } - - /* Check for compressed instructions */ - switch (((insn >> 13) & 0b111)) { - case 7: - switch (insn & 0b11) { - case 0: /*c.sd */ - case 2: /* c.sdsp */ - is_write = 1; - break; - default: - break; - } - break; - case 6: - switch (insn & 0b11) { - case 0: /* c.sw */ - case 3: /* c.swsp */ - is_write = 1; - break; - default: - break; - } - break; - default: - break; - } - - return handle_cpu_signal(pc, info, is_write, &uc->uc_sigmask); -} - -#else - -#error host CPU specific signal handler needed - -#endif - /* The softmmu versions of these helpers are in cputlb.c. */ -uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr) +/* + * Verify that we have passed the correct MemOp to the correct function. + * + * We could present one function to target code, and dispatch based on + * the MemOp, but so far we have worked hard to avoid an indirect function + * call along the memory path. + */ +static void validate_memop(MemOpIdx oi, MemOp expected) { - uint32_t ret; - uint16_t meminfo = trace_mem_get_info(MO_UB, MMU_USER_IDX, false); +#ifdef CONFIG_DEBUG_TCG + MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP); + assert(have == expected); +#endif +} - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - ret = ldub_p(g2h(env_cpu(env), ptr)); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); +void helper_unaligned_ld(CPUArchState *env, target_ulong addr) +{ + cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC()); +} + +void helper_unaligned_st(CPUArchState *env, target_ulong addr) +{ + cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC()); +} + +static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t ra, MMUAccessType type) +{ + MemOp mop = get_memop(oi); + int a_bits = get_alignment_bits(mop); + void *ret; + + /* Enforce guest required alignment. */ + if (unlikely(addr & ((1 << a_bits) - 1))) { + cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra); + } + + ret = g2h(env_cpu(env), addr); + set_helper_retaddr(ra); return ret; } -int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr) +uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { - int ret; - uint16_t meminfo = trace_mem_get_info(MO_SB, MMU_USER_IDX, false); + void *haddr; + uint8_t ret; - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - ret = ldsb_p(g2h(env_cpu(env), ptr)); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); - return ret; -} - -uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr) -{ - uint32_t ret; - uint16_t meminfo = trace_mem_get_info(MO_BEUW, MMU_USER_IDX, false); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - ret = lduw_be_p(g2h(env_cpu(env), ptr)); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); - return ret; -} - -int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr) -{ - int ret; - uint16_t meminfo = trace_mem_get_info(MO_BESW, MMU_USER_IDX, false); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - ret = ldsw_be_p(g2h(env_cpu(env), ptr)); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); - return ret; -} - -uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr) -{ - uint32_t ret; - uint16_t meminfo = trace_mem_get_info(MO_BEUL, MMU_USER_IDX, false); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - ret = ldl_be_p(g2h(env_cpu(env), ptr)); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); - return ret; -} - -uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr) -{ - uint64_t ret; - uint16_t meminfo = trace_mem_get_info(MO_BEQ, MMU_USER_IDX, false); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - ret = ldq_be_p(g2h(env_cpu(env), ptr)); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); - return ret; -} - -uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr) -{ - uint32_t ret; - uint16_t meminfo = trace_mem_get_info(MO_LEUW, MMU_USER_IDX, false); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - ret = lduw_le_p(g2h(env_cpu(env), ptr)); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); - return ret; -} - -int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr) -{ - int ret; - uint16_t meminfo = trace_mem_get_info(MO_LESW, MMU_USER_IDX, false); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - ret = ldsw_le_p(g2h(env_cpu(env), ptr)); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); - return ret; -} - -uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr) -{ - uint32_t ret; - uint16_t meminfo = trace_mem_get_info(MO_LEUL, MMU_USER_IDX, false); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - ret = ldl_le_p(g2h(env_cpu(env), ptr)); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); - return ret; -} - -uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr) -{ - uint64_t ret; - uint16_t meminfo = trace_mem_get_info(MO_LEQ, MMU_USER_IDX, false); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - ret = ldq_le_p(g2h(env_cpu(env), ptr)); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); - return ret; -} - -uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) -{ - uint32_t ret; - - set_helper_retaddr(retaddr); - ret = cpu_ldub_data(env, ptr); + validate_memop(oi, MO_UB); + trace_guest_ld_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = ldub_p(haddr); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return ret; } -int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) +uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { - int ret; + void *haddr; + uint16_t ret; - set_helper_retaddr(retaddr); - ret = cpu_ldsb_data(env, ptr); + validate_memop(oi, MO_BEUW); + trace_guest_ld_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = lduw_be_p(haddr); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return ret; } -uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) +uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { + void *haddr; uint32_t ret; - set_helper_retaddr(retaddr); - ret = cpu_lduw_be_data(env, ptr); + validate_memop(oi, MO_BEUL); + trace_guest_ld_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = ldl_be_p(haddr); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return ret; } -int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) -{ - int ret; - - set_helper_retaddr(retaddr); - ret = cpu_ldsw_be_data(env, ptr); - clear_helper_retaddr(); - return ret; -} - -uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) -{ - uint32_t ret; - - set_helper_retaddr(retaddr); - ret = cpu_ldl_be_data(env, ptr); - clear_helper_retaddr(); - return ret; -} - -uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) +uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { + void *haddr; uint64_t ret; - set_helper_retaddr(retaddr); - ret = cpu_ldq_be_data(env, ptr); + validate_memop(oi, MO_BEQ); + trace_guest_ld_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = ldq_be_p(haddr); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return ret; } -uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) +uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { + void *haddr; + uint16_t ret; + + validate_memop(oi, MO_LEUW); + trace_guest_ld_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = lduw_le_p(haddr); + clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); + return ret; +} + +uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) +{ + void *haddr; uint32_t ret; - set_helper_retaddr(retaddr); - ret = cpu_lduw_le_data(env, ptr); + validate_memop(oi, MO_LEUL); + trace_guest_ld_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = ldl_le_p(haddr); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return ret; } -int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) -{ - int ret; - - set_helper_retaddr(retaddr); - ret = cpu_ldsw_le_data(env, ptr); - clear_helper_retaddr(); - return ret; -} - -uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) -{ - uint32_t ret; - - set_helper_retaddr(retaddr); - ret = cpu_ldl_le_data(env, ptr); - clear_helper_retaddr(); - return ret; -} - -uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t retaddr) +uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, + MemOpIdx oi, uintptr_t ra) { + void *haddr; uint64_t ret; - set_helper_retaddr(retaddr); - ret = cpu_ldq_le_data(env, ptr); + validate_memop(oi, MO_LEQ); + trace_guest_ld_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD); + ret = ldq_le_p(haddr); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); return ret; } -void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val) +void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val, + MemOpIdx oi, uintptr_t ra) { - uint16_t meminfo = trace_mem_get_info(MO_UB, MMU_USER_IDX, true); + void *haddr; - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - stb_p(g2h(env_cpu(env), ptr), val); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); -} - -void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val) -{ - uint16_t meminfo = trace_mem_get_info(MO_BEUW, MMU_USER_IDX, true); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - stw_be_p(g2h(env_cpu(env), ptr), val); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); -} - -void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val) -{ - uint16_t meminfo = trace_mem_get_info(MO_BEUL, MMU_USER_IDX, true); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - stl_be_p(g2h(env_cpu(env), ptr), val); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); -} - -void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val) -{ - uint16_t meminfo = trace_mem_get_info(MO_BEQ, MMU_USER_IDX, true); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - stq_be_p(g2h(env_cpu(env), ptr), val); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); -} - -void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val) -{ - uint16_t meminfo = trace_mem_get_info(MO_LEUW, MMU_USER_IDX, true); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - stw_le_p(g2h(env_cpu(env), ptr), val); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); -} - -void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val) -{ - uint16_t meminfo = trace_mem_get_info(MO_LEUL, MMU_USER_IDX, true); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - stl_le_p(g2h(env_cpu(env), ptr), val); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); -} - -void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val) -{ - uint16_t meminfo = trace_mem_get_info(MO_LEQ, MMU_USER_IDX, true); - - trace_guest_mem_before_exec(env_cpu(env), ptr, meminfo); - stq_le_p(g2h(env_cpu(env), ptr), val); - qemu_plugin_vcpu_mem_cb(env_cpu(env), ptr, meminfo); -} - -void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr, - uint32_t val, uintptr_t retaddr) -{ - set_helper_retaddr(retaddr); - cpu_stb_data(env, ptr, val); + validate_memop(oi, MO_UB); + trace_guest_st_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stb_p(haddr, val); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } -void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr, - uint32_t val, uintptr_t retaddr) +void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, + MemOpIdx oi, uintptr_t ra) { - set_helper_retaddr(retaddr); - cpu_stw_be_data(env, ptr, val); + void *haddr; + + validate_memop(oi, MO_BEUW); + trace_guest_st_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stw_be_p(haddr, val); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } -void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr, - uint32_t val, uintptr_t retaddr) +void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, + MemOpIdx oi, uintptr_t ra) { - set_helper_retaddr(retaddr); - cpu_stl_be_data(env, ptr, val); + void *haddr; + + validate_memop(oi, MO_BEUL); + trace_guest_st_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stl_be_p(haddr, val); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } -void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr, - uint64_t val, uintptr_t retaddr) +void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, + MemOpIdx oi, uintptr_t ra) { - set_helper_retaddr(retaddr); - cpu_stq_be_data(env, ptr, val); + void *haddr; + + validate_memop(oi, MO_BEQ); + trace_guest_st_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stq_be_p(haddr, val); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } -void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr, - uint32_t val, uintptr_t retaddr) +void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val, + MemOpIdx oi, uintptr_t ra) { - set_helper_retaddr(retaddr); - cpu_stw_le_data(env, ptr, val); + void *haddr; + + validate_memop(oi, MO_LEUW); + trace_guest_st_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stw_le_p(haddr, val); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } -void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr, - uint32_t val, uintptr_t retaddr) +void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val, + MemOpIdx oi, uintptr_t ra) { - set_helper_retaddr(retaddr); - cpu_stl_le_data(env, ptr, val); + void *haddr; + + validate_memop(oi, MO_LEUL); + trace_guest_st_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stl_le_p(haddr, val); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } -void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr, - uint64_t val, uintptr_t retaddr) +void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val, + MemOpIdx oi, uintptr_t ra) { - set_helper_retaddr(retaddr); - cpu_stq_le_data(env, ptr, val); + void *haddr; + + validate_memop(oi, MO_LEQ); + trace_guest_st_before_exec(env_cpu(env), addr, oi); + haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE); + stq_le_p(haddr, val); clear_helper_retaddr(); + qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); } uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr) @@ -1255,20 +479,33 @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr) return ret; } +#include "ldst_common.c.inc" + /* * Do not allow unaligned operations to proceed. Return the host address. * * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. */ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, int size, int prot, + MemOpIdx oi, int size, int prot, uintptr_t retaddr) { + MemOp mop = get_memop(oi); + int a_bits = get_alignment_bits(mop); + void *ret; + + /* Enforce guest required alignment. */ + if (unlikely(addr & ((1 << a_bits) - 1))) { + MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE; + cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr); + } + /* Enforce qemu required alignment. */ if (unlikely(addr & (size - 1))) { cpu_loop_exit_atomic(env_cpu(env), retaddr); } - void *ret = g2h(env_cpu(env), addr); + + ret = g2h(env_cpu(env), addr); set_helper_retaddr(retaddr); return ret; } diff --git a/audio/meson.build b/audio/meson.build index 7d53b0f920..462533bb8c 100644 --- a/audio/meson.build +++ b/audio/meson.build @@ -7,23 +7,22 @@ softmmu_ss.add(files( 'wavcapture.c', )) -softmmu_ss.add(when: [coreaudio, 'CONFIG_AUDIO_COREAUDIO'], if_true: files('coreaudio.c')) -softmmu_ss.add(when: [dsound, 'CONFIG_AUDIO_DSOUND'], if_true: files('dsoundaudio.c')) -softmmu_ss.add(when: ['CONFIG_AUDIO_WIN_INT'], if_true: files('audio_win_int.c')) +softmmu_ss.add(when: coreaudio, if_true: files('coreaudio.c')) +softmmu_ss.add(when: dsound, if_true: files('dsoundaudio.c', 'audio_win_int.c')) audio_modules = {} foreach m : [ - ['CONFIG_AUDIO_ALSA', 'alsa', alsa, 'alsaaudio.c'], - ['CONFIG_AUDIO_OSS', 'oss', oss, 'ossaudio.c'], - ['CONFIG_AUDIO_PA', 'pa', pulse, 'paaudio.c'], - ['CONFIG_AUDIO_SDL', 'sdl', sdl, 'sdlaudio.c'], - ['CONFIG_AUDIO_JACK', 'jack', jack, 'jackaudio.c'], - ['CONFIG_SPICE', 'spice', spice, 'spiceaudio.c'] + ['alsa', alsa, files('alsaaudio.c')], + ['oss', oss, files('ossaudio.c')], + ['pa', pulse, files('paaudio.c')], + ['sdl', sdl, files('sdlaudio.c')], + ['jack', jack, files('jackaudio.c')], + ['spice', spice, files('spiceaudio.c')] ] - if config_host.has_key(m[0]) + if m[1].found() module_ss = ss.source_set() - module_ss.add(when: m[2], if_true: files(m[3])) - audio_modules += {m[1] : module_ss} + module_ss.add(m[1], m[2]) + audio_modules += {m[0] : module_ss} endif endforeach diff --git a/backends/hostmem-epc.c b/backends/hostmem-epc.c new file mode 100644 index 0000000000..b47f98b6a3 --- /dev/null +++ b/backends/hostmem-epc.c @@ -0,0 +1,82 @@ +/* + * QEMU host SGX EPC memory backend + * + * Copyright (C) 2019 Intel Corporation + * + * Authors: + * Sean Christopherson + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qom/object_interfaces.h" +#include "qapi/error.h" +#include "sysemu/hostmem.h" +#include "hw/i386/hostmem-epc.h" + +static void +sgx_epc_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) +{ + uint32_t ram_flags; + char *name; + int fd; + + if (!backend->size) { + error_setg(errp, "can't create backend with size 0"); + return; + } + + fd = qemu_open_old("/dev/sgx_vepc", O_RDWR); + if (fd < 0) { + error_setg_errno(errp, errno, + "failed to open /dev/sgx_vepc to alloc SGX EPC"); + return; + } + + name = object_get_canonical_path(OBJECT(backend)); + ram_flags = (backend->share ? RAM_SHARED : 0) | RAM_PROTECTED; + memory_region_init_ram_from_fd(&backend->mr, OBJECT(backend), + name, backend->size, ram_flags, + fd, 0, errp); + g_free(name); +} + +static void sgx_epc_backend_instance_init(Object *obj) +{ + HostMemoryBackend *m = MEMORY_BACKEND(obj); + + m->share = true; + m->merge = false; + m->dump = false; +} + +static void sgx_epc_backend_class_init(ObjectClass *oc, void *data) +{ + HostMemoryBackendClass *bc = MEMORY_BACKEND_CLASS(oc); + + bc->alloc = sgx_epc_backend_memory_alloc; +} + +static const TypeInfo sgx_epc_backed_info = { + .name = TYPE_MEMORY_BACKEND_EPC, + .parent = TYPE_MEMORY_BACKEND, + .instance_init = sgx_epc_backend_instance_init, + .class_init = sgx_epc_backend_class_init, + .instance_size = sizeof(HostMemoryBackendEpc), +}; + +static void register_types(void) +{ + int fd = qemu_open_old("/dev/sgx_vepc", O_RDWR); + if (fd >= 0) { + close(fd); + + type_register_static(&sgx_epc_backed_info); + } +} + +type_init(register_types); diff --git a/backends/meson.build b/backends/meson.build index d4221831fc..6e68945528 100644 --- a/backends/meson.build +++ b/backends/meson.build @@ -16,5 +16,6 @@ softmmu_ss.add(when: ['CONFIG_VHOST_USER', 'CONFIG_VIRTIO'], if_true: files('vho softmmu_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('cryptodev-vhost.c')) softmmu_ss.add(when: ['CONFIG_VIRTIO_CRYPTO', 'CONFIG_VHOST_CRYPTO'], if_true: files('cryptodev-vhost-user.c')) softmmu_ss.add(when: 'CONFIG_GIO', if_true: [files('dbus-vmstate.c'), gio]) +softmmu_ss.add(when: 'CONFIG_SGX', if_true: files('hostmem-epc.c')) subdir('tpm') diff --git a/block.c b/block.c index 5ce08a79fd..580cb77a70 100644 --- a/block.c +++ b/block.c @@ -84,6 +84,9 @@ static BlockDriverState *bdrv_open_inherit(const char *filename, BdrvChildRole child_role, Error **errp); +static bool bdrv_recurse_has_child(BlockDriverState *bs, + BlockDriverState *child); + static void bdrv_replace_child_noperm(BdrvChild *child, BlockDriverState *new_bs); static void bdrv_remove_file_or_backing_child(BlockDriverState *bs, @@ -1604,16 +1607,26 @@ open_failed: return ret; } -BlockDriverState *bdrv_new_open_driver(BlockDriver *drv, const char *node_name, - int flags, Error **errp) +/* + * Create and open a block node. + * + * @options is a QDict of options to pass to the block drivers, or NULL for an + * empty set of options. The reference to the QDict belongs to the block layer + * after the call (even on failure), so if the caller intends to reuse the + * dictionary, it needs to use qobject_ref() before calling bdrv_open. + */ +BlockDriverState *bdrv_new_open_driver_opts(BlockDriver *drv, + const char *node_name, + QDict *options, int flags, + Error **errp) { BlockDriverState *bs; int ret; bs = bdrv_new(); bs->open_flags = flags; - bs->explicit_options = qdict_new(); - bs->options = qdict_new(); + bs->options = options ?: qdict_new(); + bs->explicit_options = qdict_clone_shallow(bs->options); bs->opaque = NULL; update_options_from_flags(bs->options, flags); @@ -1631,6 +1644,13 @@ BlockDriverState *bdrv_new_open_driver(BlockDriver *drv, const char *node_name, return bs; } +/* Create and open a block node. */ +BlockDriverState *bdrv_new_open_driver(BlockDriver *drv, const char *node_name, + int flags, Error **errp) +{ + return bdrv_new_open_driver_opts(drv, node_name, NULL, flags, errp); +} + QemuOptsList bdrv_runtime_opts = { .name = "bdrv_common", .head = QTAILQ_HEAD_INITIALIZER(bdrv_runtime_opts.head), @@ -2656,6 +2676,7 @@ static void bdrv_replace_child_noperm(BdrvChild *child, int drain_saldo; assert(!child->frozen); + assert(old_bs != new_bs); if (old_bs && new_bs) { assert(bdrv_get_aio_context(old_bs) == bdrv_get_aio_context(new_bs)); @@ -2875,6 +2896,12 @@ static int bdrv_attach_child_noperm(BlockDriverState *parent_bs, assert(parent_bs->drv); + if (bdrv_recurse_has_child(child_bs, parent_bs)) { + error_setg(errp, "Making '%s' a %s child of '%s' would create a cycle", + child_bs->node_name, child_name, parent_bs->node_name); + return -EINVAL; + } + bdrv_get_cumulative_perm(parent_bs, &perm, &shared_perm); bdrv_child_perm(parent_bs, child_bs, NULL, child_role, NULL, perm, shared_perm, &perm, &shared_perm); @@ -5102,29 +5129,61 @@ static void bdrv_delete(BlockDriverState *bs) g_free(bs); } -BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *node_options, + +/* + * Replace @bs by newly created block node. + * + * @options is a QDict of options to pass to the block drivers, or NULL for an + * empty set of options. The reference to the QDict belongs to the block layer + * after the call (even on failure), so if the caller intends to reuse the + * dictionary, it needs to use qobject_ref() before calling bdrv_open. + */ +BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *options, int flags, Error **errp) { - BlockDriverState *new_node_bs; - Error *local_err = NULL; + ERRP_GUARD(); + int ret; + BlockDriverState *new_node_bs = NULL; + const char *drvname, *node_name; + BlockDriver *drv; - new_node_bs = bdrv_open(NULL, NULL, node_options, flags, errp); - if (new_node_bs == NULL) { + drvname = qdict_get_try_str(options, "driver"); + if (!drvname) { + error_setg(errp, "driver is not specified"); + goto fail; + } + + drv = bdrv_find_format(drvname); + if (!drv) { + error_setg(errp, "Unknown driver: '%s'", drvname); + goto fail; + } + + node_name = qdict_get_try_str(options, "node-name"); + + new_node_bs = bdrv_new_open_driver_opts(drv, node_name, options, flags, + errp); + options = NULL; /* bdrv_new_open_driver() eats options */ + if (!new_node_bs) { error_prepend(errp, "Could not create node: "); - return NULL; + goto fail; } bdrv_drained_begin(bs); - bdrv_replace_node(bs, new_node_bs, &local_err); + ret = bdrv_replace_node(bs, new_node_bs, errp); bdrv_drained_end(bs); - if (local_err) { - bdrv_unref(new_node_bs); - error_propagate(errp, local_err); - return NULL; + if (ret < 0) { + error_prepend(errp, "Could not replace node: "); + goto fail; } return new_node_bs; + +fail: + qobject_unref(options); + bdrv_unref(new_node_bs); + return NULL; } /* diff --git a/block/aio_task.c b/block/aio_task.c index 88989fa248..9bd17ea2c1 100644 --- a/block/aio_task.c +++ b/block/aio_task.c @@ -98,6 +98,8 @@ AioTaskPool *coroutine_fn aio_task_pool_new(int max_busy_tasks) { AioTaskPool *pool = g_new0(AioTaskPool, 1); + assert(max_busy_tasks > 0); + pool->main_co = qemu_coroutine_self(); pool->max_busy_tasks = max_busy_tasks; diff --git a/block/backup.c b/block/backup.c index 687d2882bc..21d5983779 100644 --- a/block/backup.c +++ b/block/backup.c @@ -327,11 +327,12 @@ static void coroutine_fn backup_set_speed(BlockJob *job, int64_t speed) } } -static void backup_cancel(Job *job, bool force) +static bool backup_cancel(Job *job, bool force) { BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); bdrv_cancel_in_flight(s->target_bs); + return true; } static const BlockJobDriver backup_job_driver = { @@ -407,8 +408,8 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, return NULL; } - if (perf->max_workers < 1) { - error_setg(errp, "max-workers must be greater than zero"); + if (perf->max_workers < 1 || perf->max_workers > INT_MAX) { + error_setg(errp, "max-workers must be between 1 and %d", INT_MAX); return NULL; } diff --git a/block/blkdebug.c b/block/blkdebug.c index 8b67554bec..bbf2948703 100644 --- a/block/blkdebug.c +++ b/block/blkdebug.c @@ -631,8 +631,8 @@ static int rule_check(BlockDriverState *bs, uint64_t offset, uint64_t bytes, } static int coroutine_fn -blkdebug_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +blkdebug_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { int err; @@ -652,8 +652,8 @@ blkdebug_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, } static int coroutine_fn -blkdebug_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +blkdebug_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { int err; @@ -684,7 +684,7 @@ static int blkdebug_co_flush(BlockDriverState *bs) } static int coroutine_fn blkdebug_co_pwrite_zeroes(BlockDriverState *bs, - int64_t offset, int bytes, + int64_t offset, int64_t bytes, BdrvRequestFlags flags) { uint32_t align = MAX(bs->bl.request_alignment, @@ -717,7 +717,7 @@ static int coroutine_fn blkdebug_co_pwrite_zeroes(BlockDriverState *bs, } static int coroutine_fn blkdebug_co_pdiscard(BlockDriverState *bs, - int64_t offset, int bytes) + int64_t offset, int64_t bytes) { uint32_t align = bs->bl.pdiscard_alignment; int err; diff --git a/block/blklogwrites.c b/block/blklogwrites.c index b7579370a3..f7a251e91f 100644 --- a/block/blklogwrites.c +++ b/block/blklogwrites.c @@ -301,8 +301,8 @@ static void blk_log_writes_refresh_limits(BlockDriverState *bs, Error **errp) } static int coroutine_fn -blk_log_writes_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +blk_log_writes_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags); } @@ -460,16 +460,16 @@ blk_log_writes_co_do_file_pdiscard(BlkLogWritesFileReq *fr) } static int coroutine_fn -blk_log_writes_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +blk_log_writes_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { return blk_log_writes_co_log(bs, offset, bytes, qiov, flags, blk_log_writes_co_do_file_pwritev, 0, false); } static int coroutine_fn -blk_log_writes_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes, - BdrvRequestFlags flags) +blk_log_writes_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, + int64_t bytes, BdrvRequestFlags flags) { return blk_log_writes_co_log(bs, offset, bytes, NULL, flags, blk_log_writes_co_do_file_pwrite_zeroes, 0, @@ -484,9 +484,9 @@ static int coroutine_fn blk_log_writes_co_flush_to_disk(BlockDriverState *bs) } static int coroutine_fn -blk_log_writes_co_pdiscard(BlockDriverState *bs, int64_t offset, int count) +blk_log_writes_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes) { - return blk_log_writes_co_log(bs, offset, count, NULL, 0, + return blk_log_writes_co_log(bs, offset, bytes, NULL, 0, blk_log_writes_co_do_file_pdiscard, LOG_DISCARD_FLAG, false); } diff --git a/block/blkreplay.c b/block/blkreplay.c index 4a247752fd..dcbe780ddb 100644 --- a/block/blkreplay.c +++ b/block/blkreplay.c @@ -72,7 +72,7 @@ static void block_request_create(uint64_t reqid, BlockDriverState *bs, } static int coroutine_fn blkreplay_co_preadv(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) { uint64_t reqid = blkreplay_next_id(); int ret = bdrv_co_preadv(bs->file, offset, bytes, qiov, flags); @@ -83,7 +83,7 @@ static int coroutine_fn blkreplay_co_preadv(BlockDriverState *bs, } static int coroutine_fn blkreplay_co_pwritev(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) { uint64_t reqid = blkreplay_next_id(); int ret = bdrv_co_pwritev(bs->file, offset, bytes, qiov, flags); @@ -94,7 +94,7 @@ static int coroutine_fn blkreplay_co_pwritev(BlockDriverState *bs, } static int coroutine_fn blkreplay_co_pwrite_zeroes(BlockDriverState *bs, - int64_t offset, int bytes, BdrvRequestFlags flags) + int64_t offset, int64_t bytes, BdrvRequestFlags flags) { uint64_t reqid = blkreplay_next_id(); int ret = bdrv_co_pwrite_zeroes(bs->file, offset, bytes, flags); @@ -105,7 +105,7 @@ static int coroutine_fn blkreplay_co_pwrite_zeroes(BlockDriverState *bs, } static int coroutine_fn blkreplay_co_pdiscard(BlockDriverState *bs, - int64_t offset, int bytes) + int64_t offset, int64_t bytes) { uint64_t reqid = blkreplay_next_id(); int ret = bdrv_co_pdiscard(bs->file, offset, bytes); diff --git a/block/blkverify.c b/block/blkverify.c index 188d7632fa..d1facf5ba9 100644 --- a/block/blkverify.c +++ b/block/blkverify.c @@ -221,8 +221,8 @@ blkverify_co_prwv(BlockDriverState *bs, BlkverifyRequest *r, uint64_t offset, } static int coroutine_fn -blkverify_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +blkverify_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BlkverifyRequest r; QEMUIOVector raw_qiov; @@ -250,8 +250,8 @@ blkverify_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, } static int coroutine_fn -blkverify_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +blkverify_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BlkverifyRequest r; return blkverify_co_prwv(bs, &r, offset, bytes, qiov, qiov, flags, true); diff --git a/block/block-backend.c b/block/block-backend.c index 6140d133e2..12ef80ea17 100644 --- a/block/block-backend.c +++ b/block/block-backend.c @@ -14,6 +14,7 @@ #include "sysemu/block-backend.h" #include "block/block_int.h" #include "block/blockjob.h" +#include "block/coroutines.h" #include "block/throttle-groups.h" #include "hw/qdev-core.h" #include "sysemu/blockdev.h" @@ -1161,11 +1162,11 @@ void blk_set_disable_request_queuing(BlockBackend *blk, bool disable) } static int blk_check_byte_request(BlockBackend *blk, int64_t offset, - size_t size) + int64_t bytes) { int64_t len; - if (size > INT_MAX) { + if (bytes < 0) { return -EIO; } @@ -1183,7 +1184,7 @@ static int blk_check_byte_request(BlockBackend *blk, int64_t offset, return len; } - if (offset > len || len - offset < size) { + if (offset > len || len - offset < bytes) { return -EIO; } } @@ -1204,9 +1205,9 @@ static void coroutine_fn blk_wait_while_drained(BlockBackend *blk) } /* To be called between exactly one pair of blk_inc/dec_in_flight() */ -static int coroutine_fn -blk_do_preadv(BlockBackend *blk, int64_t offset, unsigned int bytes, - QEMUIOVector *qiov, BdrvRequestFlags flags) +int coroutine_fn +blk_co_do_preadv(BlockBackend *blk, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { int ret; BlockDriverState *bs; @@ -1236,23 +1237,23 @@ blk_do_preadv(BlockBackend *blk, int64_t offset, unsigned int bytes, } int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset, - unsigned int bytes, QEMUIOVector *qiov, + int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) { int ret; blk_inc_in_flight(blk); - ret = blk_do_preadv(blk, offset, bytes, qiov, flags); + ret = blk_co_do_preadv(blk, offset, bytes, qiov, flags); blk_dec_in_flight(blk); return ret; } /* To be called between exactly one pair of blk_inc/dec_in_flight() */ -static int coroutine_fn -blk_do_pwritev_part(BlockBackend *blk, int64_t offset, unsigned int bytes, - QEMUIOVector *qiov, size_t qiov_offset, - BdrvRequestFlags flags) +int coroutine_fn +blk_co_do_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, size_t qiov_offset, + BdrvRequestFlags flags) { int ret; BlockDriverState *bs; @@ -1286,12 +1287,33 @@ blk_do_pwritev_part(BlockBackend *blk, int64_t offset, unsigned int bytes, } int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset, - unsigned int bytes, + int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags) { int ret; + blk_inc_in_flight(blk); + ret = blk_co_do_pwritev_part(blk, offset, bytes, qiov, qiov_offset, flags); + blk_dec_in_flight(blk); + + return ret; +} + +int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) +{ + return blk_co_pwritev_part(blk, offset, bytes, qiov, 0, flags); +} + +static int coroutine_fn blk_pwritev_part(BlockBackend *blk, int64_t offset, + int64_t bytes, + QEMUIOVector *qiov, size_t qiov_offset, + BdrvRequestFlags flags) +{ + int ret; + blk_inc_in_flight(blk); ret = blk_do_pwritev_part(blk, offset, bytes, qiov, qiov_offset, flags); blk_dec_in_flight(blk); @@ -1299,13 +1321,6 @@ int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset, return ret; } -int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset, - unsigned int bytes, QEMUIOVector *qiov, - BdrvRequestFlags flags) -{ - return blk_co_pwritev_part(blk, offset, bytes, qiov, 0, flags); -} - typedef struct BlkRwCo { BlockBackend *blk; int64_t offset; @@ -1314,58 +1329,11 @@ typedef struct BlkRwCo { BdrvRequestFlags flags; } BlkRwCo; -static void blk_read_entry(void *opaque) -{ - BlkRwCo *rwco = opaque; - QEMUIOVector *qiov = rwco->iobuf; - - rwco->ret = blk_do_preadv(rwco->blk, rwco->offset, qiov->size, - qiov, rwco->flags); - aio_wait_kick(); -} - -static void blk_write_entry(void *opaque) -{ - BlkRwCo *rwco = opaque; - QEMUIOVector *qiov = rwco->iobuf; - - rwco->ret = blk_do_pwritev_part(rwco->blk, rwco->offset, qiov->size, - qiov, 0, rwco->flags); - aio_wait_kick(); -} - -static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf, - int64_t bytes, CoroutineEntry co_entry, - BdrvRequestFlags flags) -{ - QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); - BlkRwCo rwco = { - .blk = blk, - .offset = offset, - .iobuf = &qiov, - .flags = flags, - .ret = NOT_DONE, - }; - - blk_inc_in_flight(blk); - if (qemu_in_coroutine()) { - /* Fast-path if already in coroutine context */ - co_entry(&rwco); - } else { - Coroutine *co = qemu_coroutine_create(co_entry, &rwco); - bdrv_coroutine_enter(blk_bs(blk), co); - BDRV_POLL_WHILE(blk_bs(blk), rwco.ret == NOT_DONE); - } - blk_dec_in_flight(blk); - - return rwco.ret; -} - int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset, - int bytes, BdrvRequestFlags flags) + int64_t bytes, BdrvRequestFlags flags) { - return blk_prw(blk, offset, NULL, bytes, blk_write_entry, - flags | BDRV_REQ_ZERO_WRITE); + return blk_pwritev_part(blk, offset, bytes, NULL, 0, + flags | BDRV_REQ_ZERO_WRITE); } int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags) @@ -1412,7 +1380,7 @@ BlockAIOCB *blk_abort_aio_request(BlockBackend *blk, typedef struct BlkAioEmAIOCB { BlockAIOCB common; BlkRwCo rwco; - int bytes; + int64_t bytes; bool has_returned; } BlkAioEmAIOCB; @@ -1444,7 +1412,8 @@ static void blk_aio_complete_bh(void *opaque) blk_aio_complete(acb); } -static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes, +static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, + int64_t bytes, void *iobuf, CoroutineEntry co_entry, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque) @@ -1483,8 +1452,8 @@ static void blk_aio_read_entry(void *opaque) QEMUIOVector *qiov = rwco->iobuf; assert(qiov->size == acb->bytes); - rwco->ret = blk_do_preadv(rwco->blk, rwco->offset, acb->bytes, - qiov, rwco->flags); + rwco->ret = blk_co_do_preadv(rwco->blk, rwco->offset, acb->bytes, + qiov, rwco->flags); blk_aio_complete(acb); } @@ -1495,37 +1464,40 @@ static void blk_aio_write_entry(void *opaque) QEMUIOVector *qiov = rwco->iobuf; assert(!qiov || qiov->size == acb->bytes); - rwco->ret = blk_do_pwritev_part(rwco->blk, rwco->offset, acb->bytes, - qiov, 0, rwco->flags); + rwco->ret = blk_co_do_pwritev_part(rwco->blk, rwco->offset, acb->bytes, + qiov, 0, rwco->flags); blk_aio_complete(acb); } BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset, - int count, BdrvRequestFlags flags, + int64_t bytes, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque) { - return blk_aio_prwv(blk, offset, count, NULL, blk_aio_write_entry, + return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_write_entry, flags | BDRV_REQ_ZERO_WRITE, cb, opaque); } -int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count) +int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int bytes) { - int ret = blk_prw(blk, offset, buf, count, blk_read_entry, 0); - if (ret < 0) { - return ret; - } - return count; + int ret; + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); + + blk_inc_in_flight(blk); + ret = blk_do_preadv(blk, offset, bytes, &qiov, 0); + blk_dec_in_flight(blk); + + return ret < 0 ? ret : bytes; } -int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count, +int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int bytes, BdrvRequestFlags flags) { - int ret = blk_prw(blk, offset, (void *) buf, count, blk_write_entry, - flags); - if (ret < 0) { - return ret; - } - return count; + int ret; + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); + + ret = blk_pwritev_part(blk, offset, bytes, &qiov, 0, flags); + + return ret < 0 ? ret : bytes; } int64_t blk_getlength(BlockBackend *blk) @@ -1559,6 +1531,7 @@ BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset, QEMUIOVector *qiov, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque) { + assert((uint64_t)qiov->size <= INT64_MAX); return blk_aio_prwv(blk, offset, qiov->size, qiov, blk_aio_read_entry, flags, cb, opaque); } @@ -1567,6 +1540,7 @@ BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset, QEMUIOVector *qiov, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque) { + assert((uint64_t)qiov->size <= INT64_MAX); return blk_aio_prwv(blk, offset, qiov->size, qiov, blk_aio_write_entry, flags, cb, opaque); } @@ -1582,8 +1556,8 @@ void blk_aio_cancel_async(BlockAIOCB *acb) } /* To be called between exactly one pair of blk_inc/dec_in_flight() */ -static int coroutine_fn -blk_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf) +int coroutine_fn +blk_co_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf) { blk_wait_while_drained(blk); @@ -1594,18 +1568,15 @@ blk_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf) return bdrv_co_ioctl(blk_bs(blk), req, buf); } -static void blk_ioctl_entry(void *opaque) -{ - BlkRwCo *rwco = opaque; - QEMUIOVector *qiov = rwco->iobuf; - - rwco->ret = blk_do_ioctl(rwco->blk, rwco->offset, qiov->iov[0].iov_base); - aio_wait_kick(); -} - int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf) { - return blk_prw(blk, req, buf, 0, blk_ioctl_entry, 0); + int ret; + + blk_inc_in_flight(blk); + ret = blk_do_ioctl(blk, req, buf); + blk_dec_in_flight(blk); + + return ret; } static void blk_aio_ioctl_entry(void *opaque) @@ -1613,7 +1584,7 @@ static void blk_aio_ioctl_entry(void *opaque) BlkAioEmAIOCB *acb = opaque; BlkRwCo *rwco = &acb->rwco; - rwco->ret = blk_do_ioctl(rwco->blk, rwco->offset, rwco->iobuf); + rwco->ret = blk_co_do_ioctl(rwco->blk, rwco->offset, rwco->iobuf); blk_aio_complete(acb); } @@ -1625,8 +1596,8 @@ BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf, } /* To be called between exactly one pair of blk_inc/dec_in_flight() */ -static int coroutine_fn -blk_do_pdiscard(BlockBackend *blk, int64_t offset, int bytes) +int coroutine_fn +blk_co_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes) { int ret; @@ -1645,19 +1616,31 @@ static void blk_aio_pdiscard_entry(void *opaque) BlkAioEmAIOCB *acb = opaque; BlkRwCo *rwco = &acb->rwco; - rwco->ret = blk_do_pdiscard(rwco->blk, rwco->offset, acb->bytes); + rwco->ret = blk_co_do_pdiscard(rwco->blk, rwco->offset, acb->bytes); blk_aio_complete(acb); } BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, - int64_t offset, int bytes, + int64_t offset, int64_t bytes, BlockCompletionFunc *cb, void *opaque) { return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_pdiscard_entry, 0, cb, opaque); } -int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes) +int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset, + int64_t bytes) +{ + int ret; + + blk_inc_in_flight(blk); + ret = blk_co_do_pdiscard(blk, offset, bytes); + blk_dec_in_flight(blk); + + return ret; +} + +int blk_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes) { int ret; @@ -1668,22 +1651,8 @@ int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes) return ret; } -static void blk_pdiscard_entry(void *opaque) -{ - BlkRwCo *rwco = opaque; - QEMUIOVector *qiov = rwco->iobuf; - - rwco->ret = blk_do_pdiscard(rwco->blk, rwco->offset, qiov->size); - aio_wait_kick(); -} - -int blk_pdiscard(BlockBackend *blk, int64_t offset, int bytes) -{ - return blk_prw(blk, offset, NULL, bytes, blk_pdiscard_entry, 0); -} - /* To be called between exactly one pair of blk_inc/dec_in_flight() */ -static int coroutine_fn blk_do_flush(BlockBackend *blk) +int coroutine_fn blk_co_do_flush(BlockBackend *blk) { blk_wait_while_drained(blk); @@ -1699,7 +1668,7 @@ static void blk_aio_flush_entry(void *opaque) BlkAioEmAIOCB *acb = opaque; BlkRwCo *rwco = &acb->rwco; - rwco->ret = blk_do_flush(rwco->blk); + rwco->ret = blk_co_do_flush(rwco->blk); blk_aio_complete(acb); } @@ -1714,22 +1683,21 @@ int coroutine_fn blk_co_flush(BlockBackend *blk) int ret; blk_inc_in_flight(blk); - ret = blk_do_flush(blk); + ret = blk_co_do_flush(blk); blk_dec_in_flight(blk); return ret; } -static void blk_flush_entry(void *opaque) -{ - BlkRwCo *rwco = opaque; - rwco->ret = blk_do_flush(rwco->blk); - aio_wait_kick(); -} - int blk_flush(BlockBackend *blk) { - return blk_prw(blk, 0, NULL, 0, blk_flush_entry, 0); + int ret; + + blk_inc_in_flight(blk); + ret = blk_do_flush(blk); + blk_dec_in_flight(blk); + + return ret; } void blk_drain(BlockBackend *blk) @@ -1986,6 +1954,12 @@ uint32_t blk_get_max_transfer(BlockBackend *blk) return ROUND_DOWN(max, blk_get_request_alignment(blk)); } +int blk_get_max_hw_iov(BlockBackend *blk) +{ + return MIN_NON_ZERO(blk->root->bs->bl.max_hw_iov, + blk->root->bs->bl.max_iov); +} + int blk_get_max_iov(BlockBackend *blk) { return blk->root->bs->bl.max_iov; @@ -2208,17 +2182,18 @@ void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk, } int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset, - int bytes, BdrvRequestFlags flags) + int64_t bytes, BdrvRequestFlags flags) { return blk_co_pwritev(blk, offset, bytes, NULL, flags | BDRV_REQ_ZERO_WRITE); } int blk_pwrite_compressed(BlockBackend *blk, int64_t offset, const void *buf, - int count) + int64_t bytes) { - return blk_prw(blk, offset, (void *) buf, count, blk_write_entry, - BDRV_REQ_WRITE_COMPRESSED); + QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); + return blk_pwritev_part(blk, offset, bytes, &qiov, 0, + BDRV_REQ_WRITE_COMPRESSED); } int blk_truncate(BlockBackend *blk, int64_t offset, bool exact, @@ -2446,7 +2421,7 @@ void blk_unregister_buf(BlockBackend *blk, void *host) int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in, BlockBackend *blk_out, int64_t off_out, - int bytes, BdrvRequestFlags read_flags, + int64_t bytes, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags) { int r; diff --git a/block/bochs.c b/block/bochs.c index 2f010ab40a..4d68658087 100644 --- a/block/bochs.c +++ b/block/bochs.c @@ -238,8 +238,8 @@ static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num) } static int coroutine_fn -bochs_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +bochs_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BDRVBochsState *s = bs->opaque; uint64_t sector_num = offset >> BDRV_SECTOR_BITS; diff --git a/block/cloop.c b/block/cloop.c index c99192a57f..b8c6d0eccd 100644 --- a/block/cloop.c +++ b/block/cloop.c @@ -245,8 +245,8 @@ static inline int cloop_read_block(BlockDriverState *bs, int block_num) } static int coroutine_fn -cloop_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +cloop_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BDRVCloopState *s = bs->opaque; uint64_t sector_num = offset >> BDRV_SECTOR_BITS; diff --git a/block/commit.c b/block/commit.c index 42792b4556..10cc5ff451 100644 --- a/block/commit.c +++ b/block/commit.c @@ -207,7 +207,7 @@ static const BlockJobDriver commit_job_driver = { }; static int coroutine_fn bdrv_commit_top_preadv(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) { return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags); } diff --git a/block/copy-before-write.c b/block/copy-before-write.c index 2a5e57deca..c30a5ff8de 100644 --- a/block/copy-before-write.c +++ b/block/copy-before-write.c @@ -40,8 +40,8 @@ typedef struct BDRVCopyBeforeWriteState { } BDRVCopyBeforeWriteState; static coroutine_fn int cbw_co_preadv( - BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) + BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags); } @@ -64,7 +64,7 @@ static coroutine_fn int cbw_do_copy_before_write(BlockDriverState *bs, } static int coroutine_fn cbw_co_pdiscard(BlockDriverState *bs, - int64_t offset, int bytes) + int64_t offset, int64_t bytes) { int ret = cbw_do_copy_before_write(bs, offset, bytes, 0); if (ret < 0) { @@ -75,7 +75,7 @@ static int coroutine_fn cbw_co_pdiscard(BlockDriverState *bs, } static int coroutine_fn cbw_co_pwrite_zeroes(BlockDriverState *bs, - int64_t offset, int bytes, BdrvRequestFlags flags) + int64_t offset, int64_t bytes, BdrvRequestFlags flags) { int ret = cbw_do_copy_before_write(bs, offset, bytes, flags); if (ret < 0) { @@ -86,9 +86,10 @@ static int coroutine_fn cbw_co_pwrite_zeroes(BlockDriverState *bs, } static coroutine_fn int cbw_co_pwritev(BlockDriverState *bs, - uint64_t offset, - uint64_t bytes, - QEMUIOVector *qiov, int flags) + int64_t offset, + int64_t bytes, + QEMUIOVector *qiov, + BdrvRequestFlags flags) { int ret = cbw_do_copy_before_write(bs, offset, bytes, flags); if (ret < 0) { diff --git a/block/copy-on-read.c b/block/copy-on-read.c index c428682272..1fc7fb3333 100644 --- a/block/copy-on-read.c +++ b/block/copy-on-read.c @@ -128,10 +128,10 @@ static int64_t cor_getlength(BlockDriverState *bs) static int coroutine_fn cor_co_preadv_part(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, + int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset, - int flags) + BdrvRequestFlags flags) { int64_t n; int local_flags; @@ -181,10 +181,11 @@ static int coroutine_fn cor_co_preadv_part(BlockDriverState *bs, static int coroutine_fn cor_co_pwritev_part(BlockDriverState *bs, - uint64_t offset, - uint64_t bytes, + int64_t offset, + int64_t bytes, QEMUIOVector *qiov, - size_t qiov_offset, int flags) + size_t qiov_offset, + BdrvRequestFlags flags) { return bdrv_co_pwritev_part(bs->file, offset, bytes, qiov, qiov_offset, flags); @@ -192,7 +193,7 @@ static int coroutine_fn cor_co_pwritev_part(BlockDriverState *bs, static int coroutine_fn cor_co_pwrite_zeroes(BlockDriverState *bs, - int64_t offset, int bytes, + int64_t offset, int64_t bytes, BdrvRequestFlags flags) { return bdrv_co_pwrite_zeroes(bs->file, offset, bytes, flags); @@ -200,15 +201,15 @@ static int coroutine_fn cor_co_pwrite_zeroes(BlockDriverState *bs, static int coroutine_fn cor_co_pdiscard(BlockDriverState *bs, - int64_t offset, int bytes) + int64_t offset, int64_t bytes) { return bdrv_co_pdiscard(bs->file, offset, bytes); } static int coroutine_fn cor_co_pwritev_compressed(BlockDriverState *bs, - uint64_t offset, - uint64_t bytes, + int64_t offset, + int64_t bytes, QEMUIOVector *qiov) { return bdrv_co_pwritev(bs->file, offset, bytes, qiov, diff --git a/block/coroutines.h b/block/coroutines.h index 514d169d23..c8c14a29c8 100644 --- a/block/coroutines.h +++ b/block/coroutines.h @@ -27,6 +27,9 @@ #include "block/block_int.h" +/* For blk_bs() in generated block/block-gen.c */ +#include "sysemu/block-backend.h" + int coroutine_fn bdrv_co_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix); int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp); @@ -72,4 +75,34 @@ int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs, Error **errp); +int generated_co_wrapper +blk_do_preadv(BlockBackend *blk, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags); +int coroutine_fn +blk_co_do_preadv(BlockBackend *blk, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags); + + +int generated_co_wrapper +blk_do_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, size_t qiov_offset, + BdrvRequestFlags flags); +int coroutine_fn +blk_co_do_pwritev_part(BlockBackend *blk, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, size_t qiov_offset, + BdrvRequestFlags flags); + +int generated_co_wrapper +blk_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf); +int coroutine_fn +blk_co_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf); + +int generated_co_wrapper +blk_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes); +int coroutine_fn +blk_co_do_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes); + +int generated_co_wrapper blk_do_flush(BlockBackend *blk); +int coroutine_fn blk_co_do_flush(BlockBackend *blk); + #endif /* BLOCK_COROUTINES_INT_H */ diff --git a/block/crypto.c b/block/crypto.c index 1d30fde38e..c8ba4681e2 100644 --- a/block/crypto.c +++ b/block/crypto.c @@ -397,8 +397,8 @@ static int block_crypto_reopen_prepare(BDRVReopenState *state, #define BLOCK_CRYPTO_MAX_IO_SIZE (1024 * 1024) static coroutine_fn int -block_crypto_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +block_crypto_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BlockCrypto *crypto = bs->opaque; uint64_t cur_bytes; /* number of bytes in current iteration */ @@ -460,8 +460,8 @@ block_crypto_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, static coroutine_fn int -block_crypto_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +block_crypto_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BlockCrypto *crypto = bs->opaque; uint64_t cur_bytes; /* number of bytes in current iteration */ diff --git a/block/curl.c b/block/curl.c index 50e741a0d7..4a8ae2b269 100644 --- a/block/curl.c +++ b/block/curl.c @@ -896,7 +896,8 @@ out: } static int coroutine_fn curl_co_preadv(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) { CURLAIOCB acb = { .co = qemu_coroutine_self(), diff --git a/block/dmg.c b/block/dmg.c index ef35a505f2..447901fbb8 100644 --- a/block/dmg.c +++ b/block/dmg.c @@ -689,8 +689,8 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num) } static int coroutine_fn -dmg_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +dmg_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BDRVDMGState *s = bs->opaque; uint64_t sector_num = offset >> BDRV_SECTOR_BITS; diff --git a/block/export/fuse.c b/block/export/fuse.c index 2e3bf8270b..823c126d23 100644 --- a/block/export/fuse.c +++ b/block/export/fuse.c @@ -31,6 +31,10 @@ #include #include +#if defined(CONFIG_FALLOCATE_ZERO_RANGE) +#include +#endif + #ifdef __linux__ #include #endif diff --git a/block/file-posix.c b/block/file-posix.c index d81e15efa4..7a27c83060 100644 --- a/block/file-posix.c +++ b/block/file-posix.c @@ -150,6 +150,8 @@ typedef struct BDRVRawState { uint64_t locked_perm; uint64_t locked_shared_perm; + uint64_t aio_max_batch; + int perm_change_fd; int perm_change_flags; BDRVReopenState *reopen_state; @@ -530,6 +532,11 @@ static QemuOptsList raw_runtime_opts = { .type = QEMU_OPT_STRING, .help = "host AIO implementation (threads, native, io_uring)", }, + { + .name = "aio-max-batch", + .type = QEMU_OPT_NUMBER, + .help = "AIO max batch size (0 = auto handled by AIO backend, default: 0)", + }, { .name = "locking", .type = QEMU_OPT_STRING, @@ -609,6 +616,8 @@ static int raw_open_common(BlockDriverState *bs, QDict *options, s->use_linux_io_uring = (aio == BLOCKDEV_AIO_OPTIONS_IO_URING); #endif + s->aio_max_batch = qemu_opt_get_number(opts, "aio-max-batch", 0); + locking = qapi_enum_parse(&OnOffAuto_lookup, qemu_opt_get(opts, "locking"), ON_OFF_AUTO_AUTO, &local_err); @@ -1273,7 +1282,7 @@ static void raw_refresh_limits(BlockDriverState *bs, Error **errp) ret = hdev_get_max_segments(s->fd, &st); if (ret > 0) { - bs->bl.max_iov = ret; + bs->bl.max_hw_iov = ret; } } } @@ -1807,7 +1816,7 @@ static int handle_aiocb_copy_range(void *opaque) static int handle_aiocb_discard(void *opaque) { RawPosixAIOData *aiocb = opaque; - int ret = -EOPNOTSUPP; + int ret = -ENOTSUP; BDRVRawState *s = aiocb->bs->opaque; if (!s->has_discard) { @@ -1829,7 +1838,7 @@ static int handle_aiocb_discard(void *opaque) #ifdef CONFIG_FALLOCATE_PUNCH_HOLE ret = do_fallocate(s->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, aiocb->aio_offset, aiocb->aio_nbytes); - ret = translate_err(-errno); + ret = translate_err(ret); #elif defined(__APPLE__) && (__MACH__) fpunchhole_t fpunchhole; fpunchhole.fp_flags = 0; @@ -2057,7 +2066,8 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset, } else if (s->use_linux_aio) { LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs)); assert(qiov->size == bytes); - return laio_co_submit(bs, aio, s->fd, offset, qiov, type); + return laio_co_submit(bs, aio, s->fd, offset, qiov, type, + s->aio_max_batch); #endif } @@ -2077,16 +2087,16 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset, return raw_thread_pool_submit(bs, handle_aiocb_rw, &acb); } -static int coroutine_fn raw_co_preadv(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, - int flags) +static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) { return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_READ); } -static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, - int flags) +static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) { assert(flags == 0); return raw_co_prw(bs, offset, bytes, qiov, QEMU_AIO_WRITE); @@ -2115,7 +2125,7 @@ static void raw_aio_unplug(BlockDriverState *bs) #ifdef CONFIG_LINUX_AIO if (s->use_linux_aio) { LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs)); - laio_io_unplug(bs, aio); + laio_io_unplug(bs, aio, s->aio_max_batch); } #endif #ifdef CONFIG_LINUX_IO_URING @@ -2942,7 +2952,8 @@ static void raw_account_discard(BDRVRawState *s, uint64_t nbytes, int ret) } static coroutine_fn int -raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int bytes, bool blkdev) +raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes, + bool blkdev) { BDRVRawState *s = bs->opaque; RawPosixAIOData acb; @@ -2966,13 +2977,13 @@ raw_do_pdiscard(BlockDriverState *bs, int64_t offset, int bytes, bool blkdev) } static coroutine_fn int -raw_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes) +raw_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes) { return raw_do_pdiscard(bs, offset, bytes, false); } static int coroutine_fn -raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes, +raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes, BdrvRequestFlags flags, bool blkdev) { BDRVRawState *s = bs->opaque; @@ -3040,7 +3051,7 @@ raw_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int bytes, static int coroutine_fn raw_co_pwrite_zeroes( BlockDriverState *bs, int64_t offset, - int bytes, BdrvRequestFlags flags) + int64_t bytes, BdrvRequestFlags flags) { return raw_do_pwrite_zeroes(bs, offset, bytes, flags, false); } @@ -3203,8 +3214,8 @@ static void raw_abort_perm_update(BlockDriverState *bs) } static int coroutine_fn raw_co_copy_range_from( - BlockDriverState *bs, BdrvChild *src, uint64_t src_offset, - BdrvChild *dst, uint64_t dst_offset, uint64_t bytes, + BlockDriverState *bs, BdrvChild *src, int64_t src_offset, + BdrvChild *dst, int64_t dst_offset, int64_t bytes, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags) { return bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes, @@ -3213,10 +3224,10 @@ static int coroutine_fn raw_co_copy_range_from( static int coroutine_fn raw_co_copy_range_to(BlockDriverState *bs, BdrvChild *src, - uint64_t src_offset, + int64_t src_offset, BdrvChild *dst, - uint64_t dst_offset, - uint64_t bytes, + int64_t dst_offset, + int64_t bytes, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags) { @@ -3591,7 +3602,7 @@ hdev_co_ioctl(BlockDriverState *bs, unsigned long int req, void *buf) #endif /* linux */ static coroutine_fn int -hdev_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes) +hdev_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes) { BDRVRawState *s = bs->opaque; int ret; @@ -3605,7 +3616,7 @@ hdev_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes) } static coroutine_fn int hdev_co_pwrite_zeroes(BlockDriverState *bs, - int64_t offset, int bytes, BdrvRequestFlags flags) + int64_t offset, int64_t bytes, BdrvRequestFlags flags) { int rc; diff --git a/block/file-win32.c b/block/file-win32.c index b97c58d642..ec9d64d0e4 100644 --- a/block/file-win32.c +++ b/block/file-win32.c @@ -440,8 +440,8 @@ fail: } static BlockAIOCB *raw_aio_preadv(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags, + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque) { BDRVRawState *s = bs->opaque; @@ -455,8 +455,8 @@ static BlockAIOCB *raw_aio_preadv(BlockDriverState *bs, } static BlockAIOCB *raw_aio_pwritev(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags, + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque) { BDRVRawState *s = bs->opaque; diff --git a/block/filter-compress.c b/block/filter-compress.c index 5136371bf8..d5be538619 100644 --- a/block/filter-compress.c +++ b/block/filter-compress.c @@ -63,10 +63,10 @@ static int64_t compress_getlength(BlockDriverState *bs) static int coroutine_fn compress_co_preadv_part(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, + int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset, - int flags) + BdrvRequestFlags flags) { return bdrv_co_preadv_part(bs->file, offset, bytes, qiov, qiov_offset, flags); @@ -74,10 +74,11 @@ static int coroutine_fn compress_co_preadv_part(BlockDriverState *bs, static int coroutine_fn compress_co_pwritev_part(BlockDriverState *bs, - uint64_t offset, - uint64_t bytes, + int64_t offset, + int64_t bytes, QEMUIOVector *qiov, - size_t qiov_offset, int flags) + size_t qiov_offset, + BdrvRequestFlags flags) { return bdrv_co_pwritev_part(bs->file, offset, bytes, qiov, qiov_offset, flags | BDRV_REQ_WRITE_COMPRESSED); @@ -85,7 +86,7 @@ static int coroutine_fn compress_co_pwritev_part(BlockDriverState *bs, static int coroutine_fn compress_co_pwrite_zeroes(BlockDriverState *bs, - int64_t offset, int bytes, + int64_t offset, int64_t bytes, BdrvRequestFlags flags) { return bdrv_co_pwrite_zeroes(bs->file, offset, bytes, flags); @@ -93,7 +94,7 @@ static int coroutine_fn compress_co_pwrite_zeroes(BlockDriverState *bs, static int coroutine_fn compress_co_pdiscard(BlockDriverState *bs, - int64_t offset, int bytes) + int64_t offset, int64_t bytes) { return bdrv_co_pdiscard(bs->file, offset, bytes); } diff --git a/block/gluster.c b/block/gluster.c index d51938e447..398976bc66 100644 --- a/block/gluster.c +++ b/block/gluster.c @@ -891,6 +891,7 @@ out: static void qemu_gluster_refresh_limits(BlockDriverState *bs, Error **errp) { bs->bl.max_transfer = GLUSTER_MAX_TRANSFER; + bs->bl.max_pdiscard = SIZE_MAX; } static int qemu_gluster_reopen_prepare(BDRVReopenState *state, @@ -1003,19 +1004,19 @@ static void qemu_gluster_reopen_abort(BDRVReopenState *state) #ifdef CONFIG_GLUSTERFS_ZEROFILL static coroutine_fn int qemu_gluster_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, - int size, + int64_t bytes, BdrvRequestFlags flags) { int ret; GlusterAIOCB acb; BDRVGlusterState *s = bs->opaque; - acb.size = size; + acb.size = bytes; acb.ret = 0; acb.coroutine = qemu_coroutine_self(); acb.aio_context = bdrv_get_aio_context(bs); - ret = glfs_zerofill_async(s->fd, offset, size, gluster_finish_aiocb, &acb); + ret = glfs_zerofill_async(s->fd, offset, bytes, gluster_finish_aiocb, &acb); if (ret < 0) { return -errno; } @@ -1297,18 +1298,20 @@ error: #ifdef CONFIG_GLUSTERFS_DISCARD static coroutine_fn int qemu_gluster_co_pdiscard(BlockDriverState *bs, - int64_t offset, int size) + int64_t offset, int64_t bytes) { int ret; GlusterAIOCB acb; BDRVGlusterState *s = bs->opaque; + assert(bytes <= SIZE_MAX); /* rely on max_pdiscard */ + acb.size = 0; acb.ret = 0; acb.coroutine = qemu_coroutine_self(); acb.aio_context = bdrv_get_aio_context(bs); - ret = glfs_discard_async(s->fd, offset, size, gluster_finish_aiocb, &acb); + ret = glfs_discard_async(s->fd, offset, bytes, gluster_finish_aiocb, &acb); if (ret < 0) { return -errno; } diff --git a/block/io.c b/block/io.c index 99ee182ca4..bb0a254def 100644 --- a/block/io.c +++ b/block/io.c @@ -136,6 +136,7 @@ static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src) dst->min_mem_alignment = MAX(dst->min_mem_alignment, src->min_mem_alignment); dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov); + dst->max_hw_iov = MIN_NON_ZERO(dst->max_hw_iov, src->max_hw_iov); } typedef struct BdrvRefreshLimitsState { @@ -956,9 +957,9 @@ bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req, return waited; } -static int bdrv_check_qiov_request(int64_t offset, int64_t bytes, - QEMUIOVector *qiov, size_t qiov_offset, - Error **errp) +int bdrv_check_qiov_request(int64_t offset, int64_t bytes, + QEMUIOVector *qiov, size_t qiov_offset, + Error **errp) { /* * Check generic offset/bytes correctness @@ -1230,7 +1231,8 @@ out: static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, QEMUIOVector *qiov, - size_t qiov_offset, int flags) + size_t qiov_offset, + BdrvRequestFlags flags) { BlockDriver *drv = bs->drv; int64_t sector_num; @@ -1868,7 +1870,8 @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, int head = 0; int tail = 0; - int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX); + int64_t max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, + INT64_MAX); int alignment = MAX(bs->bl.pwrite_zeroes_alignment, bs->bl.request_alignment); int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER); @@ -2073,7 +2076,8 @@ bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes, */ static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child, BdrvTrackedRequest *req, int64_t offset, int64_t bytes, - int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags) + int64_t align, QEMUIOVector *qiov, size_t qiov_offset, + BdrvRequestFlags flags) { BlockDriverState *bs = child->bs; BlockDriver *drv = bs->drv; @@ -2246,7 +2250,11 @@ int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child, return -ENOMEDIUM; } - ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset); + if (flags & BDRV_REQ_ZERO_WRITE) { + ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL); + } else { + ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset); + } if (ret < 0) { return ret; } @@ -2810,7 +2818,12 @@ bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) { BlockDriver *drv = bs->drv; BlockDriverState *child_bs = bdrv_primary_bs(bs); - int ret = -ENOTSUP; + int ret; + + ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); + if (ret < 0) { + return ret; + } if (!drv) { return -ENOMEDIUM; @@ -2822,6 +2835,8 @@ bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) ret = drv->bdrv_load_vmstate(bs, qiov, pos); } else if (child_bs) { ret = bdrv_co_readv_vmstate(child_bs, qiov, pos); + } else { + ret = -ENOTSUP; } bdrv_dec_in_flight(bs); @@ -2834,7 +2849,12 @@ bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) { BlockDriver *drv = bs->drv; BlockDriverState *child_bs = bdrv_primary_bs(bs); - int ret = -ENOTSUP; + int ret; + + ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); + if (ret < 0) { + return ret; + } if (!drv) { return -ENOMEDIUM; @@ -2846,6 +2866,8 @@ bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) ret = drv->bdrv_save_vmstate(bs, qiov, pos); } else if (child_bs) { ret = bdrv_co_writev_vmstate(child_bs, qiov, pos); + } else { + ret = -ENOTSUP; } bdrv_dec_in_flight(bs); @@ -3035,7 +3057,8 @@ int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes) { BdrvTrackedRequest req; - int max_pdiscard, ret; + int ret; + int64_t max_pdiscard; int head, tail, align; BlockDriverState *bs = child->bs; @@ -3082,7 +3105,7 @@ int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset, goto out; } - max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX), + max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT64_MAX), align); assert(max_pdiscard >= bs->bl.request_alignment); diff --git a/block/iscsi.c b/block/iscsi.c index 852384086b..57aa07a40d 100644 --- a/block/iscsi.c +++ b/block/iscsi.c @@ -427,14 +427,14 @@ static int64_t sector_qemu2lun(int64_t sector, IscsiLun *iscsilun) return sector * BDRV_SECTOR_SIZE / iscsilun->block_size; } -static bool is_byte_request_lun_aligned(int64_t offset, int count, +static bool is_byte_request_lun_aligned(int64_t offset, int64_t bytes, IscsiLun *iscsilun) { - if (offset % iscsilun->block_size || count % iscsilun->block_size) { + if (offset % iscsilun->block_size || bytes % iscsilun->block_size) { error_report("iSCSI misaligned request: " "iscsilun->block_size %u, offset %" PRIi64 - ", count %d", - iscsilun->block_size, offset, count); + ", bytes %" PRIi64, + iscsilun->block_size, offset, bytes); return false; } return true; @@ -1138,7 +1138,8 @@ iscsi_getlength(BlockDriverState *bs) } static int -coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes) +coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, + int64_t bytes) { IscsiLun *iscsilun = bs->opaque; struct IscsiTask iTask; @@ -1154,6 +1155,12 @@ coroutine_fn iscsi_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes) return 0; } + /* + * We don't want to overflow list.num which is uint32_t. + * We rely on our max_pdiscard. + */ + assert(bytes / iscsilun->block_size <= UINT32_MAX); + list.lba = offset / iscsilun->block_size; list.num = bytes / iscsilun->block_size; @@ -1202,12 +1209,12 @@ out_unlock: static int coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, - int bytes, BdrvRequestFlags flags) + int64_t bytes, BdrvRequestFlags flags) { IscsiLun *iscsilun = bs->opaque; struct IscsiTask iTask; uint64_t lba; - uint32_t nb_blocks; + uint64_t nb_blocks; bool use_16_for_ws = iscsilun->use_16_for_rw; int r = 0; @@ -1247,11 +1254,21 @@ coroutine_fn iscsi_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, iscsi_co_init_iscsitask(iscsilun, &iTask); retry: if (use_16_for_ws) { + /* + * iscsi_writesame16_task num_blocks argument is uint32_t. We rely here + * on our max_pwrite_zeroes limit. + */ + assert(nb_blocks <= UINT32_MAX); iTask.task = iscsi_writesame16_task(iscsilun->iscsi, iscsilun->lun, lba, iscsilun->zeroblock, iscsilun->block_size, nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP), 0, 0, iscsi_co_generic_cb, &iTask); } else { + /* + * iscsi_writesame10_task num_blocks argument is uint16_t. We rely here + * on our max_pwrite_zeroes limit. + */ + assert(nb_blocks <= UINT16_MAX); iTask.task = iscsi_writesame10_task(iscsilun->iscsi, iscsilun->lun, lba, iscsilun->zeroblock, iscsilun->block_size, nb_blocks, 0, !!(flags & BDRV_REQ_MAY_UNMAP), @@ -2061,20 +2078,19 @@ static void iscsi_refresh_limits(BlockDriverState *bs, Error **errp) } if (iscsilun->lbp.lbpu) { - if (iscsilun->bl.max_unmap < 0xffffffff / block_size) { - bs->bl.max_pdiscard = - iscsilun->bl.max_unmap * iscsilun->block_size; - } + bs->bl.max_pdiscard = + MIN_NON_ZERO(iscsilun->bl.max_unmap * iscsilun->block_size, + (uint64_t)UINT32_MAX * iscsilun->block_size); bs->bl.pdiscard_alignment = iscsilun->bl.opt_unmap_gran * iscsilun->block_size; } else { bs->bl.pdiscard_alignment = iscsilun->block_size; } - if (iscsilun->bl.max_ws_len < 0xffffffff / block_size) { - bs->bl.max_pwrite_zeroes = - iscsilun->bl.max_ws_len * iscsilun->block_size; - } + bs->bl.max_pwrite_zeroes = + MIN_NON_ZERO(iscsilun->bl.max_ws_len * iscsilun->block_size, + max_xfer_len * iscsilun->block_size); + if (iscsilun->lbp.lbpws) { bs->bl.pwrite_zeroes_alignment = iscsilun->bl.opt_unmap_gran * iscsilun->block_size; @@ -2169,10 +2185,10 @@ static void coroutine_fn iscsi_co_invalidate_cache(BlockDriverState *bs, static int coroutine_fn iscsi_co_copy_range_from(BlockDriverState *bs, BdrvChild *src, - uint64_t src_offset, + int64_t src_offset, BdrvChild *dst, - uint64_t dst_offset, - uint64_t bytes, + int64_t dst_offset, + int64_t bytes, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags) { @@ -2310,10 +2326,10 @@ static void iscsi_xcopy_data(struct iscsi_data *data, static int coroutine_fn iscsi_co_copy_range_to(BlockDriverState *bs, BdrvChild *src, - uint64_t src_offset, + int64_t src_offset, BdrvChild *dst, - uint64_t dst_offset, - uint64_t bytes, + int64_t dst_offset, + int64_t bytes, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags) { diff --git a/block/linux-aio.c b/block/linux-aio.c index 0dab507b71..f53ae72e21 100644 --- a/block/linux-aio.c +++ b/block/linux-aio.c @@ -334,30 +334,45 @@ static void ioq_submit(LinuxAioState *s) } } +static uint64_t laio_max_batch(LinuxAioState *s, uint64_t dev_max_batch) +{ + uint64_t max_batch = s->aio_context->aio_max_batch ?: DEFAULT_MAX_BATCH; + + /* + * AIO context can be shared between multiple block devices, so + * `dev_max_batch` allows reducing the batch size for latency-sensitive + * devices. + */ + max_batch = MIN_NON_ZERO(dev_max_batch, max_batch); + + /* limit the batch with the number of available events */ + max_batch = MIN_NON_ZERO(MAX_EVENTS - s->io_q.in_flight, max_batch); + + return max_batch; +} + void laio_io_plug(BlockDriverState *bs, LinuxAioState *s) { s->io_q.plugged++; } -void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s) +void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s, + uint64_t dev_max_batch) { assert(s->io_q.plugged); - if (--s->io_q.plugged == 0 && - !s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending)) { + if (s->io_q.in_queue >= laio_max_batch(s, dev_max_batch) || + (--s->io_q.plugged == 0 && + !s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending))) { ioq_submit(s); } } static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset, - int type) + int type, uint64_t dev_max_batch) { LinuxAioState *s = laiocb->ctx; struct iocb *iocbs = &laiocb->iocb; QEMUIOVector *qiov = laiocb->qiov; - int64_t max_batch = s->aio_context->aio_max_batch ?: DEFAULT_MAX_BATCH; - - /* limit the batch with the number of available events */ - max_batch = MIN_NON_ZERO(MAX_EVENTS - s->io_q.in_flight, max_batch); switch (type) { case QEMU_AIO_WRITE: @@ -378,7 +393,7 @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset, s->io_q.in_queue++; if (!s->io_q.blocked && (!s->io_q.plugged || - s->io_q.in_queue >= max_batch)) { + s->io_q.in_queue >= laio_max_batch(s, dev_max_batch))) { ioq_submit(s); } @@ -386,7 +401,8 @@ static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset, } int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd, - uint64_t offset, QEMUIOVector *qiov, int type) + uint64_t offset, QEMUIOVector *qiov, int type, + uint64_t dev_max_batch) { int ret; struct qemu_laiocb laiocb = { @@ -398,7 +414,7 @@ int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd, .qiov = qiov, }; - ret = laio_do_submit(fd, &laiocb, offset, type); + ret = laio_do_submit(fd, &laiocb, offset, type, dev_max_batch); if (ret < 0) { return ret; } diff --git a/block/meson.build b/block/meson.build index 66ee11e62c..deb73ca389 100644 --- a/block/meson.build +++ b/block/meson.build @@ -65,7 +65,7 @@ block_ss.add(when: 'CONFIG_POSIX', if_true: [files('file-posix.c'), coref, iokit block_ss.add(when: libiscsi, if_true: files('iscsi-opts.c')) block_ss.add(when: 'CONFIG_LINUX', if_true: files('nvme.c')) block_ss.add(when: 'CONFIG_REPLICATION', if_true: files('replication.c')) -block_ss.add(when: ['CONFIG_LINUX_AIO', libaio], if_true: files('linux-aio.c')) +block_ss.add(when: libaio, if_true: files('linux-aio.c')) block_ss.add(when: linux_io_uring, if_true: files('io_uring.c')) block_modules = {} diff --git a/block/mirror.c b/block/mirror.c index 85b781bc21..efec2c7674 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -56,7 +56,6 @@ typedef struct MirrorBlockJob { bool zero_target; MirrorCopyMode copy_mode; BlockdevOnError on_source_error, on_target_error; - bool synced; /* Set when the target is synced (dirty bitmap is clean, nothing * in flight) and the job is running in active mode */ bool actively_synced; @@ -121,7 +120,6 @@ typedef enum MirrorMethod { static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, int error) { - s->synced = false; s->actively_synced = false; if (read) { return block_job_error_action(&s->common, s->on_source_error, @@ -944,12 +942,10 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) if (s->bdev_length == 0) { /* Transition to the READY state and wait for complete. */ job_transition_to_ready(&s->common.job); - s->synced = true; s->actively_synced = true; - while (!job_is_cancelled(&s->common.job) && !s->should_complete) { + while (!job_cancel_requested(&s->common.job) && !s->should_complete) { job_yield(&s->common.job); } - s->common.job.cancelled = false; goto immediate_exit; } @@ -1010,6 +1006,11 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) job_pause_point(&s->common.job); + if (job_is_cancelled(&s->common.job)) { + ret = 0; + goto immediate_exit; + } + cnt = bdrv_get_dirty_count(s->dirty_bitmap); /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is * the number of bytes currently being processed; together those are @@ -1036,7 +1037,7 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) should_complete = false; if (s->in_flight == 0 && cnt == 0) { trace_mirror_before_flush(s); - if (!s->synced) { + if (!job_is_ready(&s->common.job)) { if (mirror_flush(s) < 0) { /* Go check s->ret. */ continue; @@ -1047,14 +1048,13 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) * the target in a consistent state. */ job_transition_to_ready(&s->common.job); - s->synced = true; if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) { s->actively_synced = true; } } should_complete = s->should_complete || - job_is_cancelled(&s->common.job); + job_cancel_requested(&s->common.job); cnt = bdrv_get_dirty_count(s->dirty_bitmap); } @@ -1084,24 +1084,17 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) * completion. */ assert(QLIST_EMPTY(&bs->tracked_requests)); - s->common.job.cancelled = false; need_drain = false; break; } - ret = 0; - - if (s->synced && !should_complete) { + if (job_is_ready(&s->common.job) && !should_complete) { delay_ns = (s->in_flight == 0 && cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0); } - trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); + trace_mirror_before_sleep(s, cnt, job_is_ready(&s->common.job), + delay_ns); job_sleep_ns(&s->common.job, delay_ns); - if (job_is_cancelled(&s->common.job) && - (!s->synced || s->common.job.force_cancel)) - { - break; - } s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); } @@ -1111,8 +1104,7 @@ immediate_exit: * or it was cancelled prematurely so that we do not guarantee that * the target is a copy of the source. */ - assert(ret < 0 || ((s->common.job.force_cancel || !s->synced) && - job_is_cancelled(&s->common.job))); + assert(ret < 0 || job_is_cancelled(&s->common.job)); assert(need_drain); mirror_wait_for_all_io(s); } @@ -1135,7 +1127,7 @@ static void mirror_complete(Job *job, Error **errp) { MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); - if (!s->synced) { + if (!job_is_ready(job)) { error_setg(errp, "The active block job '%s' cannot be completed", job->id); return; @@ -1190,21 +1182,34 @@ static bool mirror_drained_poll(BlockJob *job) * from one of our own drain sections, to avoid a deadlock waiting for * ourselves. */ - if (!s->common.job.paused && !s->common.job.cancelled && !s->in_drain) { + if (!s->common.job.paused && !job_is_cancelled(&job->job) && !s->in_drain) { return true; } return !!s->in_flight; } -static void mirror_cancel(Job *job, bool force) +static bool mirror_cancel(Job *job, bool force) { MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); BlockDriverState *target = blk_bs(s->target); - if (force || !job_is_ready(job)) { + /* + * Before the job is READY, we treat any cancellation like a + * force-cancellation. + */ + force = force || !job_is_ready(job); + + if (force) { bdrv_cancel_in_flight(target); } + return force; +} + +static bool commit_active_cancel(Job *job, bool force) +{ + /* Same as above in mirror_cancel() */ + return force || !job_is_ready(job); } static const BlockJobDriver mirror_job_driver = { @@ -1234,6 +1239,7 @@ static const BlockJobDriver commit_active_job_driver = { .abort = mirror_abort, .pause = mirror_pause, .complete = mirror_complete, + .cancel = commit_active_cancel, }, .drained_poll = mirror_drained_poll, }; @@ -1402,7 +1408,7 @@ static void coroutine_fn active_write_settle(MirrorOp *op) } static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) { return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags); } @@ -1417,6 +1423,7 @@ static int coroutine_fn bdrv_mirror_top_do_write(BlockDriverState *bs, bool copy_to_target; copy_to_target = s->job->ret >= 0 && + !job_is_cancelled(&s->job->common.job) && s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; if (copy_to_target) { @@ -1456,7 +1463,7 @@ out: } static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) { MirrorBDSOpaque *s = bs->opaque; QEMUIOVector bounce_qiov; @@ -1465,6 +1472,7 @@ static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs, bool copy_to_target; copy_to_target = s->job->ret >= 0 && + !job_is_cancelled(&s->job->common.job) && s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; if (copy_to_target) { @@ -1501,14 +1509,14 @@ static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs) } static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs, - int64_t offset, int bytes, BdrvRequestFlags flags) + int64_t offset, int64_t bytes, BdrvRequestFlags flags) { return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL, flags); } static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs, - int64_t offset, int bytes) + int64_t offset, int64_t bytes) { return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes, NULL, 0); diff --git a/block/nbd.c b/block/nbd.c index f6ff1c4fb4..5ef462db1b 100644 --- a/block/nbd.c +++ b/block/nbd.c @@ -57,7 +57,8 @@ typedef struct { Coroutine *coroutine; uint64_t offset; /* original offset of the request */ - bool receiving; /* waiting for connection_co? */ + bool receiving; /* sleeping in the yield in nbd_receive_replies */ + bool reply_possible; /* reply header not yet received */ } NBDClientRequest; typedef enum NBDClientState { @@ -73,14 +74,10 @@ typedef struct BDRVNBDState { CoMutex send_mutex; CoQueue free_sema; - Coroutine *connection_co; - Coroutine *teardown_co; - QemuCoSleep reconnect_sleep; - bool drained; - bool wait_drained_end; + + CoMutex receive_mutex; int in_flight; NBDClientState state; - bool wait_in_flight; QEMUTimer *reconnect_delay_timer; @@ -127,33 +124,44 @@ static bool nbd_client_connected(BDRVNBDState *s) return qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTED; } +static bool nbd_recv_coroutine_wake_one(NBDClientRequest *req) +{ + if (req->receiving) { + req->receiving = false; + aio_co_wake(req->coroutine); + return true; + } + + return false; +} + +static void nbd_recv_coroutines_wake(BDRVNBDState *s, bool all) +{ + int i; + + for (i = 0; i < MAX_NBD_REQUESTS; i++) { + if (nbd_recv_coroutine_wake_one(&s->requests[i]) && !all) { + return; + } + } +} + static void nbd_channel_error(BDRVNBDState *s, int ret) { + if (nbd_client_connected(s)) { + qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); + } + if (ret == -EIO) { if (nbd_client_connected(s)) { s->state = s->reconnect_delay ? NBD_CLIENT_CONNECTING_WAIT : NBD_CLIENT_CONNECTING_NOWAIT; } } else { - if (nbd_client_connected(s)) { - qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); - } s->state = NBD_CLIENT_QUIT; } -} -static void nbd_recv_coroutines_wake_all(BDRVNBDState *s) -{ - int i; - - for (i = 0; i < MAX_NBD_REQUESTS; i++) { - NBDClientRequest *req = &s->requests[i]; - - if (req->coroutine && req->receiving) { - req->receiving = false; - aio_co_wake(req->coroutine); - } - } + nbd_recv_coroutines_wake(s, true); } static void reconnect_delay_timer_del(BDRVNBDState *s) @@ -170,6 +178,7 @@ static void reconnect_delay_timer_cb(void *opaque) if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT) { s->state = NBD_CLIENT_CONNECTING_NOWAIT; + nbd_co_establish_connection_cancel(s->conn); while (qemu_co_enter_next(&s->free_sema, NULL)) { /* Resume all queued requests */ } @@ -192,113 +201,21 @@ static void reconnect_delay_timer_init(BDRVNBDState *s, uint64_t expire_time_ns) timer_mod(s->reconnect_delay_timer, expire_time_ns); } -static void nbd_client_detach_aio_context(BlockDriverState *bs) -{ - BDRVNBDState *s = (BDRVNBDState *)bs->opaque; - - /* Timer is deleted in nbd_client_co_drain_begin() */ - assert(!s->reconnect_delay_timer); - /* - * If reconnect is in progress we may have no ->ioc. It will be - * re-instantiated in the proper aio context once the connection is - * reestablished. - */ - if (s->ioc) { - qio_channel_detach_aio_context(QIO_CHANNEL(s->ioc)); - } -} - -static void nbd_client_attach_aio_context_bh(void *opaque) -{ - BlockDriverState *bs = opaque; - BDRVNBDState *s = (BDRVNBDState *)bs->opaque; - - if (s->connection_co) { - /* - * The node is still drained, so we know the coroutine has yielded in - * nbd_read_eof(), the only place where bs->in_flight can reach 0, or - * it is entered for the first time. Both places are safe for entering - * the coroutine. - */ - qemu_aio_coroutine_enter(bs->aio_context, s->connection_co); - } - bdrv_dec_in_flight(bs); -} - -static void nbd_client_attach_aio_context(BlockDriverState *bs, - AioContext *new_context) -{ - BDRVNBDState *s = (BDRVNBDState *)bs->opaque; - - /* - * s->connection_co is either yielded from nbd_receive_reply or from - * nbd_co_reconnect_loop() - */ - if (nbd_client_connected(s)) { - qio_channel_attach_aio_context(QIO_CHANNEL(s->ioc), new_context); - } - - bdrv_inc_in_flight(bs); - - /* - * Need to wait here for the BH to run because the BH must run while the - * node is still drained. - */ - aio_wait_bh_oneshot(new_context, nbd_client_attach_aio_context_bh, bs); -} - -static void coroutine_fn nbd_client_co_drain_begin(BlockDriverState *bs) -{ - BDRVNBDState *s = (BDRVNBDState *)bs->opaque; - - s->drained = true; - qemu_co_sleep_wake(&s->reconnect_sleep); - - nbd_co_establish_connection_cancel(s->conn); - - reconnect_delay_timer_del(s); - - if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT) { - s->state = NBD_CLIENT_CONNECTING_NOWAIT; - qemu_co_queue_restart_all(&s->free_sema); - } -} - -static void coroutine_fn nbd_client_co_drain_end(BlockDriverState *bs) -{ - BDRVNBDState *s = (BDRVNBDState *)bs->opaque; - - s->drained = false; - if (s->wait_drained_end) { - s->wait_drained_end = false; - aio_co_wake(s->connection_co); - } -} - - static void nbd_teardown_connection(BlockDriverState *bs) { BDRVNBDState *s = (BDRVNBDState *)bs->opaque; + assert(!s->in_flight); + if (s->ioc) { - /* finish any pending coroutines */ qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); + yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name), + nbd_yank, s->bs); + object_unref(OBJECT(s->ioc)); + s->ioc = NULL; } s->state = NBD_CLIENT_QUIT; - if (s->connection_co) { - qemu_co_sleep_wake(&s->reconnect_sleep); - nbd_co_establish_connection_cancel(s->conn); - } - if (qemu_in_coroutine()) { - s->teardown_co = qemu_coroutine_self(); - /* connection_co resumes us when it terminates */ - qemu_coroutine_yield(); - s->teardown_co = NULL; - } else { - BDRV_POLL_WHILE(bs, s->connection_co); - } - assert(!s->connection_co); } static bool nbd_client_connecting(BDRVNBDState *s) @@ -363,10 +280,11 @@ int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs, { BDRVNBDState *s = (BDRVNBDState *)bs->opaque; int ret; + bool blocking = nbd_client_connecting_wait(s); assert(!s->ioc); - s->ioc = nbd_co_establish_connection(s->conn, &s->info, true, errp); + s->ioc = nbd_co_establish_connection(s->conn, &s->info, blocking, errp); if (!s->ioc) { return -ECONNREFUSED; } @@ -402,29 +320,22 @@ int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs, return 0; } +/* called under s->send_mutex */ static coroutine_fn void nbd_reconnect_attempt(BDRVNBDState *s) { - if (!nbd_client_connecting(s)) { - return; - } + assert(nbd_client_connecting(s)); + assert(s->in_flight == 0); - /* Wait for completion of all in-flight requests */ - - qemu_co_mutex_lock(&s->send_mutex); - - while (s->in_flight > 0) { - qemu_co_mutex_unlock(&s->send_mutex); - nbd_recv_coroutines_wake_all(s); - s->wait_in_flight = true; - qemu_coroutine_yield(); - s->wait_in_flight = false; - qemu_co_mutex_lock(&s->send_mutex); - } - - qemu_co_mutex_unlock(&s->send_mutex); - - if (!nbd_client_connecting(s)) { - return; + if (nbd_client_connecting_wait(s) && s->reconnect_delay && + !s->reconnect_delay_timer) + { + /* + * It's first reconnect attempt after switching to + * NBD_CLIENT_CONNECTING_WAIT + */ + reconnect_delay_timer_init(s, + qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + + s->reconnect_delay * NANOSECONDS_PER_SECOND); } /* @@ -444,135 +355,73 @@ static coroutine_fn void nbd_reconnect_attempt(BDRVNBDState *s) nbd_co_do_establish_connection(s->bs, NULL); } -static coroutine_fn void nbd_co_reconnect_loop(BDRVNBDState *s) +static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t handle) { - uint64_t timeout = 1 * NANOSECONDS_PER_SECOND; - uint64_t max_timeout = 16 * NANOSECONDS_PER_SECOND; + int ret; + uint64_t ind = HANDLE_TO_INDEX(s, handle), ind2; + QEMU_LOCK_GUARD(&s->receive_mutex); - if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT) { - reconnect_delay_timer_init(s, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + - s->reconnect_delay * NANOSECONDS_PER_SECOND); - } - - nbd_reconnect_attempt(s); - - while (nbd_client_connecting(s)) { - if (s->drained) { - bdrv_dec_in_flight(s->bs); - s->wait_drained_end = true; - while (s->drained) { - /* - * We may be entered once from nbd_client_attach_aio_context_bh - * and then from nbd_client_co_drain_end. So here is a loop. - */ - qemu_coroutine_yield(); - } - bdrv_inc_in_flight(s->bs); - } else { - qemu_co_sleep_ns_wakeable(&s->reconnect_sleep, - QEMU_CLOCK_REALTIME, timeout); - if (s->drained) { - continue; - } - if (timeout < max_timeout) { - timeout *= 2; - } - } - - nbd_reconnect_attempt(s); - } - - reconnect_delay_timer_del(s); -} - -static coroutine_fn void nbd_connection_entry(void *opaque) -{ - BDRVNBDState *s = opaque; - uint64_t i; - int ret = 0; - Error *local_err = NULL; - - while (qatomic_load_acquire(&s->state) != NBD_CLIENT_QUIT) { - /* - * The NBD client can only really be considered idle when it has - * yielded from qio_channel_readv_all_eof(), waiting for data. This is - * the point where the additional scheduled coroutine entry happens - * after nbd_client_attach_aio_context(). - * - * Therefore we keep an additional in_flight reference all the time and - * only drop it temporarily here. - */ - - if (nbd_client_connecting(s)) { - nbd_co_reconnect_loop(s); + while (true) { + if (s->reply.handle == handle) { + /* We are done */ + return 0; } if (!nbd_client_connected(s)) { + return -EIO; + } + + if (s->reply.handle != 0) { + /* + * Some other request is being handled now. It should already be + * woken by whoever set s->reply.handle (or never wait in this + * yield). So, we should not wake it here. + */ + ind2 = HANDLE_TO_INDEX(s, s->reply.handle); + assert(!s->requests[ind2].receiving); + + s->requests[ind].receiving = true; + qemu_co_mutex_unlock(&s->receive_mutex); + + qemu_coroutine_yield(); + /* + * We may be woken for 3 reasons: + * 1. From this function, executing in parallel coroutine, when our + * handle is received. + * 2. From nbd_channel_error(), when connection is lost. + * 3. From nbd_co_receive_one_chunk(), when previous request is + * finished and s->reply.handle set to 0. + * Anyway, it's OK to lock the mutex and go to the next iteration. + */ + + qemu_co_mutex_lock(&s->receive_mutex); + assert(!s->requests[ind].receiving); continue; } + /* We are under mutex and handle is 0. We have to do the dirty work. */ assert(s->reply.handle == 0); - ret = nbd_receive_reply(s->bs, s->ioc, &s->reply, &local_err); - - if (local_err) { - trace_nbd_read_reply_entry_fail(ret, error_get_pretty(local_err)); - error_free(local_err); - local_err = NULL; - } + ret = nbd_receive_reply(s->bs, s->ioc, &s->reply, NULL); if (ret <= 0) { - nbd_channel_error(s, ret ? ret : -EIO); - continue; + ret = ret ? ret : -EIO; + nbd_channel_error(s, ret); + return ret; } - - /* - * There's no need for a mutex on the receive side, because the - * handler acts as a synchronization point and ensures that only - * one coroutine is called until the reply finishes. - */ - i = HANDLE_TO_INDEX(s, s->reply.handle); - if (i >= MAX_NBD_REQUESTS || - !s->requests[i].coroutine || - !s->requests[i].receiving || - (nbd_reply_is_structured(&s->reply) && !s->info.structured_reply)) - { + if (nbd_reply_is_structured(&s->reply) && !s->info.structured_reply) { nbd_channel_error(s, -EINVAL); - continue; + return -EINVAL; } - - /* - * We're woken up again by the request itself. Note that there - * is no race between yielding and reentering connection_co. This - * is because: - * - * - if the request runs on the same AioContext, it is only - * entered after we yield - * - * - if the request runs on a different AioContext, reentering - * connection_co happens through a bottom half, which can only - * run after we yield. - */ - s->requests[i].receiving = false; - aio_co_wake(s->requests[i].coroutine); - qemu_coroutine_yield(); + if (s->reply.handle == handle) { + /* We are done */ + return 0; + } + ind2 = HANDLE_TO_INDEX(s, s->reply.handle); + if (ind2 >= MAX_NBD_REQUESTS || !s->requests[ind2].reply_possible) { + nbd_channel_error(s, -EINVAL); + return -EINVAL; + } + nbd_recv_coroutine_wake_one(&s->requests[ind2]); } - - qemu_co_queue_restart_all(&s->free_sema); - nbd_recv_coroutines_wake_all(s); - bdrv_dec_in_flight(s->bs); - - s->connection_co = NULL; - if (s->ioc) { - qio_channel_detach_aio_context(QIO_CHANNEL(s->ioc)); - yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name), - nbd_yank, s->bs); - object_unref(OBJECT(s->ioc)); - s->ioc = NULL; - } - - if (s->teardown_co) { - aio_co_wake(s->teardown_co); - } - aio_wait_kick(); } static int nbd_co_send_request(BlockDriverState *bs, @@ -583,10 +432,17 @@ static int nbd_co_send_request(BlockDriverState *bs, int rc, i = -1; qemu_co_mutex_lock(&s->send_mutex); - while (s->in_flight == MAX_NBD_REQUESTS || nbd_client_connecting_wait(s)) { + + while (s->in_flight == MAX_NBD_REQUESTS || + (!nbd_client_connected(s) && s->in_flight > 0)) + { qemu_co_queue_wait(&s->free_sema, &s->send_mutex); } + if (nbd_client_connecting(s)) { + nbd_reconnect_attempt(s); + } + if (!nbd_client_connected(s)) { rc = -EIO; goto err; @@ -606,6 +462,7 @@ static int nbd_co_send_request(BlockDriverState *bs, s->requests[i].coroutine = qemu_coroutine_self(); s->requests[i].offset = request->from; s->requests[i].receiving = false; + s->requests[i].reply_possible = true; request->handle = INDEX_TO_HANDLE(s, i); @@ -633,10 +490,6 @@ err: if (i != -1) { s->requests[i].coroutine = NULL; s->in_flight--; - } - if (s->in_flight == 0 && s->wait_in_flight) { - aio_co_wake(s->connection_co); - } else { qemu_co_queue_next(&s->free_sema); } } @@ -935,10 +788,7 @@ static coroutine_fn int nbd_co_do_receive_one_chunk( } *request_ret = 0; - /* Wait until we're woken up by nbd_connection_entry. */ - s->requests[i].receiving = true; - qemu_coroutine_yield(); - assert(!s->requests[i].receiving); + nbd_receive_replies(s, handle); if (!nbd_client_connected(s)) { error_setg(errp, "Connection closed"); return -EIO; @@ -1031,14 +881,7 @@ static coroutine_fn int nbd_co_receive_one_chunk( } s->reply.handle = 0; - if (s->connection_co && !s->wait_in_flight) { - /* - * We must check s->wait_in_flight, because we may entered by - * nbd_recv_coroutines_wake_all(), in this case we should not - * wake connection_co here, it will woken by last request. - */ - aio_co_wake(s->connection_co); - } + nbd_recv_coroutines_wake(s, false); return ret; } @@ -1149,11 +992,7 @@ break_loop: qemu_co_mutex_lock(&s->send_mutex); s->in_flight--; - if (s->in_flight == 0 && s->wait_in_flight) { - aio_co_wake(s->connection_co); - } else { - qemu_co_queue_next(&s->free_sema); - } + qemu_co_queue_next(&s->free_sema); qemu_co_mutex_unlock(&s->send_mutex); return false; @@ -1322,8 +1161,9 @@ static int nbd_co_request(BlockDriverState *bs, NBDRequest *request, return ret ? ret : request_ret; } -static int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, int flags) +static int nbd_client_co_preadv(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) { int ret, request_ret; Error *local_err = NULL; @@ -1380,8 +1220,9 @@ static int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset, return ret ? ret : request_ret; } -static int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, int flags) +static int nbd_client_co_pwritev(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) { BDRVNBDState *s = (BDRVNBDState *)bs->opaque; NBDRequest request = { @@ -1405,15 +1246,17 @@ static int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset, } static int nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, - int bytes, BdrvRequestFlags flags) + int64_t bytes, BdrvRequestFlags flags) { BDRVNBDState *s = (BDRVNBDState *)bs->opaque; NBDRequest request = { .type = NBD_CMD_WRITE_ZEROES, .from = offset, - .len = bytes, + .len = bytes, /* .len is uint32_t actually */ }; + assert(bytes <= UINT32_MAX); /* rely on max_pwrite_zeroes */ + assert(!(s->info.flags & NBD_FLAG_READ_ONLY)); if (!(s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES)) { return -ENOTSUP; @@ -1453,15 +1296,17 @@ static int nbd_client_co_flush(BlockDriverState *bs) } static int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, - int bytes) + int64_t bytes) { BDRVNBDState *s = (BDRVNBDState *)bs->opaque; NBDRequest request = { .type = NBD_CMD_TRIM, .from = offset, - .len = bytes, + .len = bytes, /* len is uint32_t */ }; + assert(bytes <= UINT32_MAX); /* rely on max_pdiscard */ + assert(!(s->info.flags & NBD_FLAG_READ_ONLY)); if (!(s->info.flags & NBD_FLAG_SEND_TRIM) || !bytes) { return 0; @@ -1969,6 +1814,7 @@ static int nbd_open(BlockDriverState *bs, QDict *options, int flags, s->bs = bs; qemu_co_mutex_init(&s->send_mutex); qemu_co_queue_init(&s->free_sema); + qemu_co_mutex_init(&s->receive_mutex); if (!yank_register_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name), errp)) { return -EEXIST; @@ -1983,14 +1829,13 @@ static int nbd_open(BlockDriverState *bs, QDict *options, int flags, s->x_dirty_bitmap, s->tlscreds); /* TODO: Configurable retry-until-timeout behaviour. */ + s->state = NBD_CLIENT_CONNECTING_WAIT; ret = nbd_do_establish_connection(bs, errp); if (ret < 0) { goto fail; } - s->connection_co = qemu_coroutine_create(nbd_connection_entry, s); - bdrv_inc_in_flight(bs); - aio_co_schedule(bdrv_get_aio_context(bs), s->connection_co); + nbd_client_connection_enable_retry(s->conn); return 0; @@ -2144,6 +1989,8 @@ static void nbd_cancel_in_flight(BlockDriverState *bs) s->state = NBD_CLIENT_CONNECTING_NOWAIT; qemu_co_queue_restart_all(&s->free_sema); } + + nbd_co_establish_connection_cancel(s->conn); } static BlockDriver bdrv_nbd = { @@ -2164,10 +2011,6 @@ static BlockDriver bdrv_nbd = { .bdrv_refresh_limits = nbd_refresh_limits, .bdrv_co_truncate = nbd_co_truncate, .bdrv_getlength = nbd_getlength, - .bdrv_detach_aio_context = nbd_client_detach_aio_context, - .bdrv_attach_aio_context = nbd_client_attach_aio_context, - .bdrv_co_drain_begin = nbd_client_co_drain_begin, - .bdrv_co_drain_end = nbd_client_co_drain_end, .bdrv_refresh_filename = nbd_refresh_filename, .bdrv_co_block_status = nbd_client_co_block_status, .bdrv_dirname = nbd_dirname, @@ -2193,10 +2036,6 @@ static BlockDriver bdrv_nbd_tcp = { .bdrv_refresh_limits = nbd_refresh_limits, .bdrv_co_truncate = nbd_co_truncate, .bdrv_getlength = nbd_getlength, - .bdrv_detach_aio_context = nbd_client_detach_aio_context, - .bdrv_attach_aio_context = nbd_client_attach_aio_context, - .bdrv_co_drain_begin = nbd_client_co_drain_begin, - .bdrv_co_drain_end = nbd_client_co_drain_end, .bdrv_refresh_filename = nbd_refresh_filename, .bdrv_co_block_status = nbd_client_co_block_status, .bdrv_dirname = nbd_dirname, @@ -2222,10 +2061,6 @@ static BlockDriver bdrv_nbd_unix = { .bdrv_refresh_limits = nbd_refresh_limits, .bdrv_co_truncate = nbd_co_truncate, .bdrv_getlength = nbd_getlength, - .bdrv_detach_aio_context = nbd_client_detach_aio_context, - .bdrv_attach_aio_context = nbd_client_attach_aio_context, - .bdrv_co_drain_begin = nbd_client_co_drain_begin, - .bdrv_co_drain_end = nbd_client_co_drain_end, .bdrv_refresh_filename = nbd_refresh_filename, .bdrv_co_block_status = nbd_client_co_block_status, .bdrv_dirname = nbd_dirname, diff --git a/block/nfs.c b/block/nfs.c index 9aeaefb364..577aea1d22 100644 --- a/block/nfs.c +++ b/block/nfs.c @@ -262,9 +262,9 @@ nfs_co_generic_cb(int ret, struct nfs_context *nfs, void *data, nfs_co_generic_bh_cb, task); } -static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *iov, - int flags) +static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *iov, + BdrvRequestFlags flags) { NFSClient *client = bs->opaque; NFSRPC task; @@ -296,9 +296,9 @@ static int coroutine_fn nfs_co_preadv(BlockDriverState *bs, uint64_t offset, return 0; } -static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *iov, - int flags) +static int coroutine_fn nfs_co_pwritev(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *iov, + BdrvRequestFlags flags) { NFSClient *client = bs->opaque; NFSRPC task; diff --git a/block/null.c b/block/null.c index cc9b1d4ea7..75f7d0db40 100644 --- a/block/null.c +++ b/block/null.c @@ -116,8 +116,9 @@ static coroutine_fn int null_co_common(BlockDriverState *bs) } static coroutine_fn int null_co_preadv(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, + BdrvRequestFlags flags) { BDRVNullState *s = bs->opaque; @@ -129,8 +130,9 @@ static coroutine_fn int null_co_preadv(BlockDriverState *bs, } static coroutine_fn int null_co_pwritev(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, + BdrvRequestFlags flags) { return null_co_common(bs); } @@ -187,8 +189,8 @@ static inline BlockAIOCB *null_aio_common(BlockDriverState *bs, } static BlockAIOCB *null_aio_preadv(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags, + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque) { @@ -202,8 +204,8 @@ static BlockAIOCB *null_aio_preadv(BlockDriverState *bs, } static BlockAIOCB *null_aio_pwritev(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags, + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque) { diff --git a/block/nvme.c b/block/nvme.c index abfe305baf..e4f336d79c 100644 --- a/block/nvme.c +++ b/block/nvme.c @@ -183,15 +183,20 @@ static bool nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q, return r == 0; } +static void nvme_free_queue(NVMeQueue *q) +{ + qemu_vfree(q->queue); +} + static void nvme_free_queue_pair(NVMeQueuePair *q) { - trace_nvme_free_queue_pair(q->index, q); + trace_nvme_free_queue_pair(q->index, q, &q->cq, &q->sq); if (q->completion_bh) { qemu_bh_delete(q->completion_bh); } + nvme_free_queue(&q->sq); + nvme_free_queue(&q->cq); qemu_vfree(q->prp_list_pages); - qemu_vfree(q->sq.queue); - qemu_vfree(q->cq.queue); qemu_mutex_destroy(&q->lock); g_free(q); } @@ -514,10 +519,10 @@ static bool nvme_identify(BlockDriverState *bs, int namespace, Error **errp) { BDRVNVMeState *s = bs->opaque; bool ret = false; - union { + QEMU_AUTO_VFREE union { NvmeIdCtrl ctrl; NvmeIdNs ns; - } *id; + } *id = NULL; NvmeLBAF *lbaf; uint16_t oncs; int r; @@ -595,7 +600,6 @@ static bool nvme_identify(BlockDriverState *bs, int namespace, Error **errp) s->blkshift = lbaf->ds; out: qemu_vfio_dma_unmap(s->vfio, id); - qemu_vfree(id); return ret; } @@ -1219,7 +1223,7 @@ static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes, { BDRVNVMeState *s = bs->opaque; int r; - uint8_t *buf = NULL; + QEMU_AUTO_VFREE uint8_t *buf = NULL; QEMUIOVector local_qiov; size_t len = QEMU_ALIGN_UP(bytes, qemu_real_host_page_size); assert(QEMU_IS_ALIGNED(offset, s->page_size)); @@ -1246,20 +1250,21 @@ static int nvme_co_prw(BlockDriverState *bs, uint64_t offset, uint64_t bytes, if (!r && !is_write) { qemu_iovec_from_buf(qiov, 0, buf, bytes); } - qemu_vfree(buf); return r; } static coroutine_fn int nvme_co_preadv(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, + BdrvRequestFlags flags) { return nvme_co_prw(bs, offset, bytes, qiov, false, flags); } static coroutine_fn int nvme_co_pwritev(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, + BdrvRequestFlags flags) { return nvme_co_prw(bs, offset, bytes, qiov, true, flags); } @@ -1294,19 +1299,29 @@ static coroutine_fn int nvme_co_flush(BlockDriverState *bs) static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, - int bytes, + int64_t bytes, BdrvRequestFlags flags) { BDRVNVMeState *s = bs->opaque; NVMeQueuePair *ioq = s->queues[INDEX_IO(0)]; NVMeRequest *req; - - uint32_t cdw12 = ((bytes >> s->blkshift) - 1) & 0xFFFF; + uint32_t cdw12; if (!s->supports_write_zeroes) { return -ENOTSUP; } + if (bytes == 0) { + return 0; + } + + cdw12 = ((bytes >> s->blkshift) - 1) & 0xFFFF; + /* + * We should not lose information. pwrite_zeroes_alignment and + * max_pwrite_zeroes guarantees it. + */ + assert(((cdw12 + 1) << s->blkshift) == bytes); + NvmeCmd cmd = { .opcode = NVME_CMD_WRITE_ZEROES, .nsid = cpu_to_le32(s->nsid), @@ -1348,12 +1363,12 @@ static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs, static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs, int64_t offset, - int bytes) + int64_t bytes) { BDRVNVMeState *s = bs->opaque; NVMeQueuePair *ioq = s->queues[INDEX_IO(0)]; NVMeRequest *req; - NvmeDsmRange *buf; + QEMU_AUTO_VFREE NvmeDsmRange *buf = NULL; QEMUIOVector local_qiov; int ret; @@ -1375,6 +1390,14 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs, assert(s->queue_count > 1); + /* + * Filling the @buf requires @offset and @bytes to satisfy restrictions + * defined in nvme_refresh_limits(). + */ + assert(QEMU_IS_ALIGNED(bytes, 1UL << s->blkshift)); + assert(QEMU_IS_ALIGNED(offset, 1UL << s->blkshift)); + assert((bytes >> s->blkshift) <= UINT32_MAX); + buf = qemu_try_memalign(s->page_size, s->page_size); if (!buf) { return -ENOMEM; @@ -1420,7 +1443,6 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs, trace_nvme_dsm_done(s, offset, bytes, ret); out: qemu_iovec_destroy(&local_qiov); - qemu_vfree(buf); return ret; } @@ -1470,6 +1492,18 @@ static void nvme_refresh_limits(BlockDriverState *bs, Error **errp) bs->bl.opt_mem_alignment = s->page_size; bs->bl.request_alignment = s->page_size; bs->bl.max_transfer = s->max_transfer; + + /* + * Look at nvme_co_pwrite_zeroes: after shift and decrement we should get + * at most 0xFFFF + */ + bs->bl.max_pwrite_zeroes = 1ULL << (s->blkshift + 16); + bs->bl.pwrite_zeroes_alignment = MAX(bs->bl.request_alignment, + 1UL << s->blkshift); + + bs->bl.max_pdiscard = (uint64_t)UINT32_MAX << s->blkshift; + bs->bl.pdiscard_alignment = MAX(bs->bl.request_alignment, + 1UL << s->blkshift); } static void nvme_detach_aio_context(BlockDriverState *bs) diff --git a/block/preallocate.c b/block/preallocate.c index b619206304..1d4233f730 100644 --- a/block/preallocate.c +++ b/block/preallocate.c @@ -227,15 +227,15 @@ static void preallocate_reopen_abort(BDRVReopenState *state) } static coroutine_fn int preallocate_co_preadv_part( - BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, size_t qiov_offset, int flags) + BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags) { return bdrv_co_preadv_part(bs->file, offset, bytes, qiov, qiov_offset, flags); } static int coroutine_fn preallocate_co_pdiscard(BlockDriverState *bs, - int64_t offset, int bytes) + int64_t offset, int64_t bytes) { return bdrv_co_pdiscard(bs->file, offset, bytes); } @@ -337,7 +337,7 @@ static bool coroutine_fn handle_write(BlockDriverState *bs, int64_t offset, } static int coroutine_fn preallocate_co_pwrite_zeroes(BlockDriverState *bs, - int64_t offset, int bytes, BdrvRequestFlags flags) + int64_t offset, int64_t bytes, BdrvRequestFlags flags) { bool want_merge_zero = !(flags & ~(BDRV_REQ_ZERO_WRITE | BDRV_REQ_NO_FALLBACK)); @@ -349,11 +349,11 @@ static int coroutine_fn preallocate_co_pwrite_zeroes(BlockDriverState *bs, } static coroutine_fn int preallocate_co_pwritev_part(BlockDriverState *bs, - uint64_t offset, - uint64_t bytes, + int64_t offset, + int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset, - int flags) + BdrvRequestFlags flags) { handle_write(bs, offset, bytes, false); diff --git a/block/qcow.c b/block/qcow.c index f8919a44d1..c39940f33e 100644 --- a/block/qcow.c +++ b/block/qcow.c @@ -617,9 +617,9 @@ static void qcow_refresh_limits(BlockDriverState *bs, Error **errp) bs->bl.request_alignment = BDRV_SECTOR_SIZE; } -static coroutine_fn int qcow_co_preadv(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, - int flags) +static coroutine_fn int qcow_co_preadv(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) { BDRVQcowState *s = bs->opaque; int offset_in_cluster; @@ -714,9 +714,9 @@ static coroutine_fn int qcow_co_preadv(BlockDriverState *bs, uint64_t offset, return ret; } -static coroutine_fn int qcow_co_pwritev(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, - int flags) +static coroutine_fn int qcow_co_pwritev(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) { BDRVQcowState *s = bs->opaque; int offset_in_cluster; @@ -1047,8 +1047,8 @@ static int qcow_make_empty(BlockDriverState *bs) /* XXX: put compressed sectors first, then all the cluster aligned tables to avoid losing bytes in alignment */ static coroutine_fn int -qcow_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov) +qcow_co_pwritev_compressed(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov) { BDRVQcowState *s = bs->opaque; z_stream strm; diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c index 4ebb49a087..21884a1ab9 100644 --- a/block/qcow2-cluster.c +++ b/block/qcow2-cluster.c @@ -505,7 +505,20 @@ static int coroutine_fn do_perform_cow_read(BlockDriverState *bs, return -ENOMEDIUM; } - /* Call .bdrv_co_readv() directly instead of using the public block-layer + /* + * We never deal with requests that don't satisfy + * bdrv_check_qiov_request(), and aligning requests to clusters never + * breaks this condition. So, do some assertions before calling + * bs->drv->bdrv_co_preadv_part() which has int64_t arguments. + */ + assert(src_cluster_offset <= INT64_MAX); + assert(src_cluster_offset + offset_in_cluster <= INT64_MAX); + /* Cast qiov->size to uint64_t to silence a compiler warning on -m32 */ + assert((uint64_t)qiov->size <= INT64_MAX); + bdrv_check_qiov_request(src_cluster_offset + offset_in_cluster, qiov->size, + qiov, 0, &error_abort); + /* + * Call .bdrv_co_readv() directly instead of using the public block-layer * interface. This avoids double I/O throttling and request tracking, * which can lead to deadlock when block layer copy-on-read is enabled. */ diff --git a/block/qcow2.c b/block/qcow2.c index 02f9f3e636..d509016756 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -2310,9 +2310,10 @@ static coroutine_fn int qcow2_co_preadv_task_entry(AioTask *task) } static coroutine_fn int qcow2_co_preadv_part(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, + int64_t offset, int64_t bytes, QEMUIOVector *qiov, - size_t qiov_offset, int flags) + size_t qiov_offset, + BdrvRequestFlags flags) { BDRVQcow2State *s = bs->opaque; int ret = 0; @@ -2596,8 +2597,8 @@ static coroutine_fn int qcow2_co_pwritev_task_entry(AioTask *task) } static coroutine_fn int qcow2_co_pwritev_part( - BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, size_t qiov_offset, int flags) + BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags) { BDRVQcow2State *s = bs->opaque; int offset_in_cluster; @@ -3940,7 +3941,7 @@ static bool is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes) } static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs, - int64_t offset, int bytes, BdrvRequestFlags flags) + int64_t offset, int64_t bytes, BdrvRequestFlags flags) { int ret; BDRVQcow2State *s = bs->opaque; @@ -3995,7 +3996,7 @@ static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs, } static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs, - int64_t offset, int bytes) + int64_t offset, int64_t bytes) { int ret; BDRVQcow2State *s = bs->opaque; @@ -4025,9 +4026,9 @@ static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs, static int coroutine_fn qcow2_co_copy_range_from(BlockDriverState *bs, - BdrvChild *src, uint64_t src_offset, - BdrvChild *dst, uint64_t dst_offset, - uint64_t bytes, BdrvRequestFlags read_flags, + BdrvChild *src, int64_t src_offset, + BdrvChild *dst, int64_t dst_offset, + int64_t bytes, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags) { BDRVQcow2State *s = bs->opaque; @@ -4108,9 +4109,9 @@ out: static int coroutine_fn qcow2_co_copy_range_to(BlockDriverState *bs, - BdrvChild *src, uint64_t src_offset, - BdrvChild *dst, uint64_t dst_offset, - uint64_t bytes, BdrvRequestFlags read_flags, + BdrvChild *src, int64_t src_offset, + BdrvChild *dst, int64_t dst_offset, + int64_t bytes, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags) { BDRVQcow2State *s = bs->opaque; @@ -4630,7 +4631,7 @@ static coroutine_fn int qcow2_co_pwritev_compressed_task_entry(AioTask *task) */ static coroutine_fn int qcow2_co_pwritev_compressed_part(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, + int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset) { BDRVQcow2State *s = bs->opaque; @@ -5227,24 +5228,55 @@ static int qcow2_has_zero_init(BlockDriverState *bs) } } +/* + * Check the request to vmstate. On success return + * qcow2_vm_state_offset(bs) + @pos + */ +static int64_t qcow2_check_vmstate_request(BlockDriverState *bs, + QEMUIOVector *qiov, int64_t pos) +{ + BDRVQcow2State *s = bs->opaque; + int64_t vmstate_offset = qcow2_vm_state_offset(s); + int ret; + + /* Incoming requests must be OK */ + bdrv_check_qiov_request(pos, qiov->size, qiov, 0, &error_abort); + + if (INT64_MAX - pos < vmstate_offset) { + return -EIO; + } + + pos += vmstate_offset; + ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL); + if (ret < 0) { + return ret; + } + + return pos; +} + static int qcow2_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) { - BDRVQcow2State *s = bs->opaque; + int64_t offset = qcow2_check_vmstate_request(bs, qiov, pos); + if (offset < 0) { + return offset; + } BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE); - return bs->drv->bdrv_co_pwritev_part(bs, qcow2_vm_state_offset(s) + pos, - qiov->size, qiov, 0, 0); + return bs->drv->bdrv_co_pwritev_part(bs, offset, qiov->size, qiov, 0, 0); } static int qcow2_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos) { - BDRVQcow2State *s = bs->opaque; + int64_t offset = qcow2_check_vmstate_request(bs, qiov, pos); + if (offset < 0) { + return offset; + } BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD); - return bs->drv->bdrv_co_preadv_part(bs, qcow2_vm_state_offset(s) + pos, - qiov->size, qiov, 0, 0); + return bs->drv->bdrv_co_preadv_part(bs, offset, qiov->size, qiov, 0, 0); } /* diff --git a/block/qed.c b/block/qed.c index f45c640513..558d3646c4 100644 --- a/block/qed.c +++ b/block/qed.c @@ -582,6 +582,7 @@ static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp) BDRVQEDState *s = bs->opaque; bs->bl.pwrite_zeroes_alignment = s->header.cluster_size; + bs->bl.max_pwrite_zeroes = QEMU_ALIGN_DOWN(INT_MAX, s->header.cluster_size); } /* We have nothing to do for QED reopen, stubs just return @@ -1397,7 +1398,7 @@ static int coroutine_fn bdrv_qed_co_writev(BlockDriverState *bs, static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, - int bytes, + int64_t bytes, BdrvRequestFlags flags) { BDRVQEDState *s = bs->opaque; @@ -1408,6 +1409,12 @@ static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs, */ QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, NULL, bytes); + /* + * QED is not prepared for 63bit write-zero requests, so rely on + * max_pwrite_zeroes. + */ + assert(bytes <= INT_MAX); + /* Fall back if the request is not aligned */ if (qed_offset_into_cluster(s, offset) || qed_offset_into_cluster(s, bytes)) { diff --git a/block/quorum.c b/block/quorum.c index f2c0805000..c28dda7baa 100644 --- a/block/quorum.c +++ b/block/quorum.c @@ -663,8 +663,8 @@ static int read_fifo_child(QuorumAIOCB *acb) return ret; } -static int quorum_co_preadv(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, int flags) +static int quorum_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BDRVQuorumState *s = bs->opaque; QuorumAIOCB *acb = quorum_aio_get(bs, qiov, offset, bytes, flags); @@ -714,8 +714,9 @@ static void write_quorum_entry(void *opaque) } } -static int quorum_co_pwritev(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, int flags) +static int quorum_co_pwritev(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) { BDRVQuorumState *s = bs->opaque; QuorumAIOCB *acb = quorum_aio_get(bs, qiov, offset, bytes, flags); @@ -745,7 +746,7 @@ static int quorum_co_pwritev(BlockDriverState *bs, uint64_t offset, } static int quorum_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, - int bytes, BdrvRequestFlags flags) + int64_t bytes, BdrvRequestFlags flags) { return quorum_co_pwritev(bs, offset, bytes, NULL, diff --git a/block/raw-format.c b/block/raw-format.c index c26f493688..bda757fd19 100644 --- a/block/raw-format.c +++ b/block/raw-format.c @@ -181,8 +181,8 @@ static void raw_reopen_abort(BDRVReopenState *state) } /* Check and adjust the offset, against 'offset' and 'size' options. */ -static inline int raw_adjust_offset(BlockDriverState *bs, uint64_t *offset, - uint64_t bytes, bool is_write) +static inline int raw_adjust_offset(BlockDriverState *bs, int64_t *offset, + int64_t bytes, bool is_write) { BDRVRawState *s = bs->opaque; @@ -201,9 +201,9 @@ static inline int raw_adjust_offset(BlockDriverState *bs, uint64_t *offset, return 0; } -static int coroutine_fn raw_co_preadv(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, - int flags) +static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) { int ret; @@ -216,9 +216,9 @@ static int coroutine_fn raw_co_preadv(BlockDriverState *bs, uint64_t offset, return bdrv_co_preadv(bs->file, offset, bytes, qiov, flags); } -static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, - int flags) +static int coroutine_fn raw_co_pwritev(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) { void *buf = NULL; BlockDriver *drv; @@ -289,12 +289,12 @@ static int coroutine_fn raw_co_block_status(BlockDriverState *bs, } static int coroutine_fn raw_co_pwrite_zeroes(BlockDriverState *bs, - int64_t offset, int bytes, + int64_t offset, int64_t bytes, BdrvRequestFlags flags) { int ret; - ret = raw_adjust_offset(bs, (uint64_t *)&offset, bytes, true); + ret = raw_adjust_offset(bs, &offset, bytes, true); if (ret) { return ret; } @@ -302,11 +302,11 @@ static int coroutine_fn raw_co_pwrite_zeroes(BlockDriverState *bs, } static int coroutine_fn raw_co_pdiscard(BlockDriverState *bs, - int64_t offset, int bytes) + int64_t offset, int64_t bytes) { int ret; - ret = raw_adjust_offset(bs, (uint64_t *)&offset, bytes, true); + ret = raw_adjust_offset(bs, &offset, bytes, true); if (ret) { return ret; } @@ -532,10 +532,10 @@ static int raw_probe_geometry(BlockDriverState *bs, HDGeometry *geo) static int coroutine_fn raw_co_copy_range_from(BlockDriverState *bs, BdrvChild *src, - uint64_t src_offset, + int64_t src_offset, BdrvChild *dst, - uint64_t dst_offset, - uint64_t bytes, + int64_t dst_offset, + int64_t bytes, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags) { @@ -551,10 +551,10 @@ static int coroutine_fn raw_co_copy_range_from(BlockDriverState *bs, static int coroutine_fn raw_co_copy_range_to(BlockDriverState *bs, BdrvChild *src, - uint64_t src_offset, + int64_t src_offset, BdrvChild *dst, - uint64_t dst_offset, - uint64_t bytes, + int64_t dst_offset, + int64_t bytes, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags) { diff --git a/block/rbd.c b/block/rbd.c index dcf82b15b8..def96292e0 100644 --- a/block/rbd.c +++ b/block/rbd.c @@ -97,6 +97,12 @@ typedef struct RBDTask { int64_t ret; } RBDTask; +typedef struct RBDDiffIterateReq { + uint64_t offs; + uint64_t bytes; + bool exists; +} RBDDiffIterateReq; + static int qemu_rbd_connect(rados_t *cluster, rados_ioctx_t *io_ctx, BlockdevOptionsRbd *opts, bool cache, const char *keypairs, const char *secretid, @@ -1164,17 +1170,17 @@ static int coroutine_fn qemu_rbd_start_co(BlockDriverState *bs, } static int -coroutine_fn qemu_rbd_co_preadv(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, - int flags) +coroutine_fn qemu_rbd_co_preadv(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) { return qemu_rbd_start_co(bs, offset, bytes, qiov, flags, RBD_AIO_READ); } static int -coroutine_fn qemu_rbd_co_pwritev(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov, - int flags) +coroutine_fn qemu_rbd_co_pwritev(BlockDriverState *bs, int64_t offset, + int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags) { BDRVRBDState *s = bs->opaque; /* @@ -1197,17 +1203,17 @@ static int coroutine_fn qemu_rbd_co_flush(BlockDriverState *bs) } static int coroutine_fn qemu_rbd_co_pdiscard(BlockDriverState *bs, - int64_t offset, int count) + int64_t offset, int64_t bytes) { - return qemu_rbd_start_co(bs, offset, count, NULL, 0, RBD_AIO_DISCARD); + return qemu_rbd_start_co(bs, offset, bytes, NULL, 0, RBD_AIO_DISCARD); } #ifdef LIBRBD_SUPPORTS_WRITE_ZEROES static int coroutine_fn qemu_rbd_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, - int count, BdrvRequestFlags flags) + int64_t bytes, BdrvRequestFlags flags) { - return qemu_rbd_start_co(bs, offset, count, NULL, flags, + return qemu_rbd_start_co(bs, offset, bytes, NULL, flags, RBD_AIO_WRITE_ZEROES); } #endif @@ -1259,6 +1265,111 @@ static ImageInfoSpecific *qemu_rbd_get_specific_info(BlockDriverState *bs, return spec_info; } +/* + * rbd_diff_iterate2 allows to interrupt the exection by returning a negative + * value in the callback routine. Choose a value that does not conflict with + * an existing exitcode and return it if we want to prematurely stop the + * execution because we detected a change in the allocation status. + */ +#define QEMU_RBD_EXIT_DIFF_ITERATE2 -9000 + +static int qemu_rbd_diff_iterate_cb(uint64_t offs, size_t len, + int exists, void *opaque) +{ + RBDDiffIterateReq *req = opaque; + + assert(req->offs + req->bytes <= offs); + /* + * we do not diff against a snapshot so we should never receive a callback + * for a hole. + */ + assert(exists); + + if (!req->exists && offs > req->offs) { + /* + * we started in an unallocated area and hit the first allocated + * block. req->bytes must be set to the length of the unallocated area + * before the allocated area. stop further processing. + */ + req->bytes = offs - req->offs; + return QEMU_RBD_EXIT_DIFF_ITERATE2; + } + + if (req->exists && offs > req->offs + req->bytes) { + /* + * we started in an allocated area and jumped over an unallocated area, + * req->bytes contains the length of the allocated area before the + * unallocated area. stop further processing. + */ + return QEMU_RBD_EXIT_DIFF_ITERATE2; + } + + req->bytes += len; + req->exists = true; + + return 0; +} + +static int coroutine_fn qemu_rbd_co_block_status(BlockDriverState *bs, + bool want_zero, int64_t offset, + int64_t bytes, int64_t *pnum, + int64_t *map, + BlockDriverState **file) +{ + BDRVRBDState *s = bs->opaque; + int status, r; + RBDDiffIterateReq req = { .offs = offset }; + uint64_t features, flags; + + assert(offset + bytes <= s->image_size); + + /* default to all sectors allocated */ + status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID; + *map = offset; + *file = bs; + *pnum = bytes; + + /* check if RBD image supports fast-diff */ + r = rbd_get_features(s->image, &features); + if (r < 0) { + return status; + } + if (!(features & RBD_FEATURE_FAST_DIFF)) { + return status; + } + + /* check if RBD fast-diff result is valid */ + r = rbd_get_flags(s->image, &flags); + if (r < 0) { + return status; + } + if (flags & RBD_FLAG_FAST_DIFF_INVALID) { + return status; + } + + r = rbd_diff_iterate2(s->image, NULL, offset, bytes, true, true, + qemu_rbd_diff_iterate_cb, &req); + if (r < 0 && r != QEMU_RBD_EXIT_DIFF_ITERATE2) { + return status; + } + assert(req.bytes <= bytes); + if (!req.exists) { + if (r == 0) { + /* + * rbd_diff_iterate2 does not invoke callbacks for unallocated + * areas. This here catches the case where no callback was + * invoked at all (req.bytes == 0). + */ + assert(req.bytes == 0); + req.bytes = bytes; + } + status = BDRV_BLOCK_ZERO | BDRV_BLOCK_OFFSET_VALID; + } + + *pnum = req.bytes; + return status; +} + static int64_t qemu_rbd_getlength(BlockDriverState *bs) { BDRVRBDState *s = bs->opaque; @@ -1494,6 +1605,7 @@ static BlockDriver bdrv_rbd = { #ifdef LIBRBD_SUPPORTS_WRITE_ZEROES .bdrv_co_pwrite_zeroes = qemu_rbd_co_pwrite_zeroes, #endif + .bdrv_co_block_status = qemu_rbd_co_block_status, .bdrv_snapshot_create = qemu_rbd_snap_create, .bdrv_snapshot_delete = qemu_rbd_snap_remove, diff --git a/block/replication.c b/block/replication.c index 32444b9a8f..55c8f894aa 100644 --- a/block/replication.c +++ b/block/replication.c @@ -149,7 +149,7 @@ static void replication_close(BlockDriverState *bs) if (s->stage == BLOCK_REPLICATION_FAILOVER) { commit_job = &s->commit_job->job; assert(commit_job->aio_context == qemu_get_current_aio_context()); - job_cancel_sync(commit_job); + job_cancel_sync(commit_job, false); } if (s->mode == REPLICATION_MODE_SECONDARY) { @@ -726,7 +726,7 @@ static void replication_stop(ReplicationState *rs, bool failover, Error **errp) * disk, secondary disk in backup_job_completed(). */ if (s->backup_job) { - job_cancel_sync(&s->backup_job->job); + job_cancel_sync(&s->backup_job->job, true); } if (!failover) { diff --git a/block/throttle.c b/block/throttle.c index b685166ad4..6e8d52fa24 100644 --- a/block/throttle.c +++ b/block/throttle.c @@ -112,8 +112,9 @@ static int64_t throttle_getlength(BlockDriverState *bs) } static int coroutine_fn throttle_co_preadv(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, + BdrvRequestFlags flags) { ThrottleGroupMember *tgm = bs->opaque; @@ -123,8 +124,9 @@ static int coroutine_fn throttle_co_preadv(BlockDriverState *bs, } static int coroutine_fn throttle_co_pwritev(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, + BdrvRequestFlags flags) { ThrottleGroupMember *tgm = bs->opaque; throttle_group_co_io_limits_intercept(tgm, bytes, true); @@ -133,7 +135,7 @@ static int coroutine_fn throttle_co_pwritev(BlockDriverState *bs, } static int coroutine_fn throttle_co_pwrite_zeroes(BlockDriverState *bs, - int64_t offset, int bytes, + int64_t offset, int64_t bytes, BdrvRequestFlags flags) { ThrottleGroupMember *tgm = bs->opaque; @@ -143,7 +145,7 @@ static int coroutine_fn throttle_co_pwrite_zeroes(BlockDriverState *bs, } static int coroutine_fn throttle_co_pdiscard(BlockDriverState *bs, - int64_t offset, int bytes) + int64_t offset, int64_t bytes) { ThrottleGroupMember *tgm = bs->opaque; throttle_group_co_io_limits_intercept(tgm, bytes, true); @@ -152,8 +154,8 @@ static int coroutine_fn throttle_co_pdiscard(BlockDriverState *bs, } static int coroutine_fn throttle_co_pwritev_compressed(BlockDriverState *bs, - uint64_t offset, - uint64_t bytes, + int64_t offset, + int64_t bytes, QEMUIOVector *qiov) { return throttle_co_pwritev(bs, offset, bytes, qiov, diff --git a/block/trace-events b/block/trace-events index f4f1267c8c..549090d453 100644 --- a/block/trace-events +++ b/block/trace-events @@ -5,8 +5,8 @@ bdrv_open_common(void *bs, const char *filename, int flags, const char *format_n bdrv_lock_medium(void *bs, bool locked) "bs %p locked %d" # block-backend.c -blk_co_preadv(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %u flags 0x%x" -blk_co_pwritev(void *blk, void *bs, int64_t offset, unsigned int bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %u flags 0x%x" +blk_co_preadv(void *blk, void *bs, int64_t offset, int64_t bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %" PRId64 " flags 0x%x" +blk_co_pwritev(void *blk, void *bs, int64_t offset, int64_t bytes, int flags) "blk %p bs %p offset %"PRId64" bytes %" PRId64 " flags 0x%x" blk_root_attach(void *child, void *blk, void *bs) "child %p blk %p bs %p" blk_root_detach(void *child, void *blk, void *bs) "child %p blk %p bs %p" @@ -75,13 +75,13 @@ luring_resubmit_short_read(void *s, void *luringcb, int nread) "LuringState %p l # qcow2.c qcow2_add_task(void *co, void *bs, void *pool, const char *action, int cluster_type, uint64_t host_offset, uint64_t offset, uint64_t bytes, void *qiov, size_t qiov_offset) "co %p bs %p pool %p: %s: cluster_type %d file_cluster_offset %" PRIu64 " offset %" PRIu64 " bytes %" PRIu64 " qiov %p qiov_offset %zu" -qcow2_writev_start_req(void *co, int64_t offset, int bytes) "co %p offset 0x%" PRIx64 " bytes %d" +qcow2_writev_start_req(void *co, int64_t offset, int64_t bytes) "co %p offset 0x%" PRIx64 " bytes %" PRId64 qcow2_writev_done_req(void *co, int ret) "co %p ret %d" qcow2_writev_start_part(void *co) "co %p" qcow2_writev_done_part(void *co, int cur_bytes) "co %p cur_bytes %d" qcow2_writev_data(void *co, uint64_t offset) "co %p offset 0x%" PRIx64 -qcow2_pwrite_zeroes_start_req(void *co, int64_t offset, int count) "co %p offset 0x%" PRIx64 " count %d" -qcow2_pwrite_zeroes(void *co, int64_t offset, int count) "co %p offset 0x%" PRIx64 " count %d" +qcow2_pwrite_zeroes_start_req(void *co, int64_t offset, int64_t bytes) "co %p offset 0x%" PRIx64 " bytes %" PRId64 +qcow2_pwrite_zeroes(void *co, int64_t offset, int64_t bytes) "co %p offset 0x%" PRIx64 " bytes %" PRId64 qcow2_skip_cow(void *co, uint64_t offset, int nb_clusters) "co %p offset 0x%" PRIx64 " nb_clusters %d" # qcow2-cluster.c @@ -152,12 +152,12 @@ nvme_write_zeroes(void *s, uint64_t offset, uint64_t bytes, int flags) "s %p off nvme_qiov_unaligned(const void *qiov, int n, void *base, size_t size, int align) "qiov %p n %d base %p size 0x%zx align 0x%x" nvme_prw_buffered(void *s, uint64_t offset, uint64_t bytes, int niov, int is_write) "s %p offset 0x%"PRIx64" bytes %"PRId64" niov %d is_write %d" nvme_rw_done(void *s, int is_write, uint64_t offset, uint64_t bytes, int ret) "s %p is_write %d offset 0x%"PRIx64" bytes %"PRId64" ret %d" -nvme_dsm(void *s, uint64_t offset, uint64_t bytes) "s %p offset 0x%"PRIx64" bytes %"PRId64"" -nvme_dsm_done(void *s, uint64_t offset, uint64_t bytes, int ret) "s %p offset 0x%"PRIx64" bytes %"PRId64" ret %d" +nvme_dsm(void *s, int64_t offset, int64_t bytes) "s %p offset 0x%"PRIx64" bytes %"PRId64"" +nvme_dsm_done(void *s, int64_t offset, int64_t bytes, int ret) "s %p offset 0x%"PRIx64" bytes %"PRId64" ret %d" nvme_dma_map_flush(void *s) "s %p" nvme_free_req_queue_wait(void *s, unsigned q_index) "s %p q #%u" nvme_create_queue_pair(unsigned q_index, void *q, size_t size, void *aio_context, int fd) "index %u q %p size %zu aioctx %p fd %d" -nvme_free_queue_pair(unsigned q_index, void *q) "index %u q %p" +nvme_free_queue_pair(unsigned q_index, void *q, void *cq, void *sq) "index %u q %p cq %p sq %p" nvme_cmd_map_qiov(void *s, void *cmd, void *req, void *qiov, int entries) "s %p cmd %p req %p qiov %p entries %d" nvme_cmd_map_qiov_pages(void *s, int i, uint64_t page) "s %p page[%d] 0x%"PRIx64 nvme_cmd_map_qiov_iov(void *s, int i, void *page, int pages) "s %p iov[%d] %p pages %d" diff --git a/block/vdi.c b/block/vdi.c index 548f8a057b..bdc58d726e 100644 --- a/block/vdi.c +++ b/block/vdi.c @@ -544,8 +544,8 @@ static int coroutine_fn vdi_co_block_status(BlockDriverState *bs, } static int coroutine_fn -vdi_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +vdi_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BDRVVdiState *s = bs->opaque; QEMUIOVector local_qiov; @@ -600,8 +600,8 @@ vdi_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, } static int coroutine_fn -vdi_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +vdi_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BDRVVdiState *s = bs->opaque; QEMUIOVector local_qiov; diff --git a/block/vmdk.c b/block/vmdk.c index 4499f136bd..0dfab6e941 100644 --- a/block/vmdk.c +++ b/block/vmdk.c @@ -60,6 +60,7 @@ #define VMDK_ZEROED (-3) #define BLOCK_OPT_ZEROED_GRAIN "zeroed_grain" +#define BLOCK_OPT_TOOLSVERSION "toolsversion" typedef struct { uint32_t version; @@ -1888,8 +1889,8 @@ static int vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset, } static int coroutine_fn -vmdk_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +vmdk_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BDRVVmdkState *s = bs->opaque; int ret; @@ -2068,8 +2069,8 @@ static int vmdk_pwritev(BlockDriverState *bs, uint64_t offset, } static int coroutine_fn -vmdk_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +vmdk_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { int ret; BDRVVmdkState *s = bs->opaque; @@ -2080,8 +2081,8 @@ vmdk_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, } static int coroutine_fn -vmdk_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset, - uint64_t bytes, QEMUIOVector *qiov) +vmdk_co_pwritev_compressed(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov) { if (bytes == 0) { /* The caller will write bytes 0 to signal EOF. @@ -2109,7 +2110,7 @@ vmdk_co_pwritev_compressed(BlockDriverState *bs, uint64_t offset, static int coroutine_fn vmdk_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, - int bytes, + int64_t bytes, BdrvRequestFlags flags) { int ret; @@ -2344,6 +2345,7 @@ static int coroutine_fn vmdk_co_do_create(int64_t size, BlockdevVmdkAdapterType adapter_type, const char *backing_file, const char *hw_version, + const char *toolsversion, bool compat6, bool zeroed_grain, vmdk_create_extent_fn extent_fn, @@ -2384,7 +2386,8 @@ static int coroutine_fn vmdk_co_do_create(int64_t size, "ddb.geometry.cylinders = \"%" PRId64 "\"\n" "ddb.geometry.heads = \"%" PRIu32 "\"\n" "ddb.geometry.sectors = \"63\"\n" - "ddb.adapterType = \"%s\"\n"; + "ddb.adapterType = \"%s\"\n" + "ddb.toolsVersion = \"%s\"\n"; ext_desc_lines = g_string_new(NULL); @@ -2401,6 +2404,9 @@ static int coroutine_fn vmdk_co_do_create(int64_t size, if (!hw_version) { hw_version = "4"; } + if (!toolsversion) { + toolsversion = "2147483647"; + } if (adapter_type != BLOCKDEV_VMDK_ADAPTER_TYPE_IDE) { /* that's the number of heads with which vmware operates when @@ -2525,7 +2531,8 @@ static int coroutine_fn vmdk_co_do_create(int64_t size, size / (int64_t)(63 * number_heads * BDRV_SECTOR_SIZE), number_heads, - BlockdevVmdkAdapterType_str(adapter_type)); + BlockdevVmdkAdapterType_str(adapter_type), + toolsversion); desc_len = strlen(desc); /* the descriptor offset = 0x200 */ if (!split && !flat) { @@ -2617,6 +2624,7 @@ static int coroutine_fn vmdk_co_create_opts(BlockDriver *drv, BlockdevVmdkAdapterType adapter_type_enum; char *backing_file = NULL; char *hw_version = NULL; + char *toolsversion = NULL; char *fmt = NULL; BlockdevVmdkSubformat subformat; int ret = 0; @@ -2649,6 +2657,7 @@ static int coroutine_fn vmdk_co_create_opts(BlockDriver *drv, adapter_type = qemu_opt_get_del(opts, BLOCK_OPT_ADAPTER_TYPE); backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE); hw_version = qemu_opt_get_del(opts, BLOCK_OPT_HWVERSION); + toolsversion = qemu_opt_get_del(opts, BLOCK_OPT_TOOLSVERSION); compat6 = qemu_opt_get_bool_del(opts, BLOCK_OPT_COMPAT6, false); if (strcmp(hw_version, "undefined") == 0) { g_free(hw_version); @@ -2692,14 +2701,15 @@ static int coroutine_fn vmdk_co_create_opts(BlockDriver *drv, .opts = opts, }; ret = vmdk_co_do_create(total_size, subformat, adapter_type_enum, - backing_file, hw_version, compat6, zeroed_grain, - vmdk_co_create_opts_cb, &data, errp); + backing_file, hw_version, toolsversion, compat6, + zeroed_grain, vmdk_co_create_opts_cb, &data, errp); exit: g_free(backing_fmt); g_free(adapter_type); g_free(backing_file); g_free(hw_version); + g_free(toolsversion); g_free(fmt); g_free(desc); g_free(path); @@ -2782,6 +2792,7 @@ static int coroutine_fn vmdk_co_create(BlockdevCreateOptions *create_options, opts->adapter_type, opts->backing_file, opts->hwversion, + opts->toolsversion, false, opts->zeroed_grain, vmdk_co_create_cb, @@ -3031,6 +3042,11 @@ static QemuOptsList vmdk_create_opts = { .help = "VMDK hardware version", .def_value_str = "undefined" }, + { + .name = BLOCK_OPT_TOOLSVERSION, + .type = QEMU_OPT_STRING, + .help = "VMware guest tools version", + }, { .name = BLOCK_OPT_SUBFMT, .type = QEMU_OPT_STRING, diff --git a/block/vpc.c b/block/vpc.c index 17a705b482..297a26262a 100644 --- a/block/vpc.c +++ b/block/vpc.c @@ -276,7 +276,8 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags, if (ret < 0) { goto fail; } - if (strncmp(footer->creator, "conectix", 8)) { + if (strncmp(footer->creator, "conectix", 8) || + be32_to_cpu(footer->type) != VHD_FIXED) { error_setg(errp, "invalid VPC image"); ret = -EINVAL; goto fail; @@ -608,8 +609,8 @@ static int vpc_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) } static int coroutine_fn -vpc_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +vpc_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BDRVVPCState *s = bs->opaque; int ret; @@ -658,8 +659,8 @@ fail: } static int coroutine_fn -vpc_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +vpc_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { BDRVVPCState *s = bs->opaque; int64_t image_offset; diff --git a/block/vvfat.c b/block/vvfat.c index 34bf1e3a86..05e78e3c27 100644 --- a/block/vvfat.c +++ b/block/vvfat.c @@ -1522,8 +1522,8 @@ static int vvfat_read(BlockDriverState *bs, int64_t sector_num, } static int coroutine_fn -vvfat_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +vvfat_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { int ret; BDRVVVFATState *s = bs->opaque; @@ -3061,8 +3061,8 @@ DLOG(checkpoint()); } static int coroutine_fn -vvfat_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +vvfat_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, + QEMUIOVector *qiov, BdrvRequestFlags flags) { int ret; BDRVVVFATState *s = bs->opaque; diff --git a/blockdev.c b/blockdev.c index e79c5f3b5e..b35072644e 100644 --- a/blockdev.c +++ b/blockdev.c @@ -1847,7 +1847,7 @@ static void drive_backup_abort(BlkActionState *common) aio_context = bdrv_get_aio_context(state->bs); aio_context_acquire(aio_context); - job_cancel_sync(&state->job->job); + job_cancel_sync(&state->job->job, true); aio_context_release(aio_context); } @@ -1948,7 +1948,7 @@ static void blockdev_backup_abort(BlkActionState *common) aio_context = bdrv_get_aio_context(state->bs); aio_context_acquire(aio_context); - job_cancel_sync(&state->job->job); + job_cancel_sync(&state->job->job, true); aio_context_release(aio_context); } diff --git a/bsd-user/errno_defs.h b/bsd-user/errno_defs.h index 1efa502a12..832671354f 100644 --- a/bsd-user/errno_defs.h +++ b/bsd-user/errno_defs.h @@ -1,6 +1,3 @@ -/* $OpenBSD: errno.h,v 1.20 2007/09/03 14:37:52 millert Exp $ */ -/* $NetBSD: errno.h,v 1.10 1996/01/20 01:33:53 jtc Exp $ */ - /* * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. @@ -37,6 +34,9 @@ * @(#)errno.h 8.5 (Berkeley) 1/21/94 */ +#ifndef _ERRNO_DEFS_H_ +#define _ERRNO_DEFS_H_ + #define TARGET_EPERM 1 /* Operation not permitted */ #define TARGET_ENOENT 2 /* No such file or directory */ #define TARGET_ESRCH 3 /* No such process */ @@ -147,3 +147,10 @@ #define TARGET_EIDRM 89 /* Identifier removed */ #define TARGET_ENOMSG 90 /* No message of desired type */ #define TARGET_ELAST 90 /* Must be equal largest errno */ + +/* Internal errors: */ +#define TARGET_EJUSTRETURN 254 /* Just return without modifing regs */ +#define TARGET_ERESTART 255 /* Restart syscall */ +#define TARGET_ERESTARTSYS TARGET_ERESTART /* Linux compat */ + +#endif /* ! _ERRNO_DEFS_H_ */ diff --git a/bsd-user/freebsd/meson.build b/bsd-user/freebsd/meson.build new file mode 100644 index 0000000000..4b69cca7b9 --- /dev/null +++ b/bsd-user/freebsd/meson.build @@ -0,0 +1,3 @@ +bsd_user_ss.add(files( + 'os-sys.c', +)) diff --git a/bsd-user/freebsd/os-sys.c b/bsd-user/freebsd/os-sys.c new file mode 100644 index 0000000000..309e27b9d6 --- /dev/null +++ b/bsd-user/freebsd/os-sys.c @@ -0,0 +1,27 @@ +/* + * FreeBSD sysctl() and sysarch() system call emulation + * + * Copyright (c) 2013-15 Stacey D. Son + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu.h" +#include "target_arch_sysarch.h" + +/* sysarch() is architecture dependent. */ +abi_long do_freebsd_sysarch(void *cpu_env, abi_long arg1, abi_long arg2) +{ + return do_freebsd_arch_sysarch(cpu_env, arg1, arg2); +} diff --git a/bsd-user/freebsd/strace.list b/bsd-user/freebsd/strace.list index b01b5f36e8..275d2dbe27 100644 --- a/bsd-user/freebsd/strace.list +++ b/bsd-user/freebsd/strace.list @@ -33,10 +33,6 @@ { TARGET_FREEBSD_NR___syscall, "__syscall", NULL, NULL, NULL }, { TARGET_FREEBSD_NR___sysctl, "__sysctl", NULL, print_sysctl, NULL }, { TARGET_FREEBSD_NR__umtx_op, "_umtx_op", "%s(%#x, %d, %d, %#x, %#x)", NULL, NULL }, -#if defined(__FreeBSD_version) && __FreeBSD_version < 1000000 -{ TARGET_FREEBSD_NR__umtx_lock, "__umtx_lock", NULL, NULL, NULL }, -{ TARGET_FREEBSD_NR__umtx_unlock, "__umtx_unlock", NULL, NULL, NULL }, -#endif { TARGET_FREEBSD_NR_accept, "accept", "%s(%d,%#x,%#x)", NULL, NULL }, { TARGET_FREEBSD_NR_accept4, "accept4", "%s(%d,%d,%#x,%#x)", NULL, NULL }, { TARGET_FREEBSD_NR_access, "access", "%s(\"%s\",%#o)", NULL, NULL }, @@ -49,10 +45,6 @@ { TARGET_FREEBSD_NR_cap_fcntls_get, "cap_fcntls_get", NULL, NULL, NULL }, { TARGET_FREEBSD_NR_cap_fcntls_limit, "cap_fcntls_limit", NULL, NULL, NULL }, { TARGET_FREEBSD_NR_cap_getmode, "cap_getmode", NULL, NULL, NULL }, -#if defined(__FreeBSD_version) && __FreeBSD_version < 1000000 -{ TARGET_FREEBSD_NR_cap_getrights, "cap_getrights", NULL, NULL, NULL }, -{ TARGET_FREEBSD_NR_cap_new, "cap_new", NULL, NULL, NULL }, -#endif { TARGET_FREEBSD_NR_cap_ioctls_get, "cap_ioctls_get", NULL, NULL, NULL }, { TARGET_FREEBSD_NR_cap_ioctls_limit, "cap_ioctls_limit", NULL, NULL, NULL }, { TARGET_FREEBSD_NR_cap_rights_limit, "cap_rights_limit", NULL, NULL, NULL }, @@ -146,9 +138,6 @@ { TARGET_FREEBSD_NR_freebsd11_kevent, "freebsd11_kevent", NULL, NULL, NULL }, { TARGET_FREEBSD_NR_kevent, "kevent", NULL, NULL, NULL }, { TARGET_FREEBSD_NR_kill, "kill", NULL, NULL, NULL }, -#if defined(__FreeBSD_version) && __FreeBSD_version < 1000000 -{ TARGET_FREEBSD_NR_killpg, "killpg", NULL, NULL, NULL }, -#endif { TARGET_FREEBSD_NR_kqueue, "kqueue", NULL, NULL, NULL }, { TARGET_FREEBSD_NR_ktrace, "ktrace", NULL, NULL, NULL }, { TARGET_FREEBSD_NR_lchown, "lchown", NULL, NULL, NULL }, diff --git a/bsd-user/freebsd/target_os_elf.h b/bsd-user/freebsd/target_os_elf.h index 2d03a883aa..e5ac8e8e50 100644 --- a/bsd-user/freebsd/target_os_elf.h +++ b/bsd-user/freebsd/target_os_elf.h @@ -38,10 +38,6 @@ #define ELF_PLATFORM (NULL) #endif -#ifndef ELF_HWCAP -#define ELF_HWCAP 0 -#endif - /* XXX Look at the other conflicting AT_* values. */ #define FREEBSD_AT_NCPUS 19 #define FREEBSD_AT_HWCAP 25 @@ -114,12 +110,16 @@ static abi_ulong target_create_elf_tables(abi_ulong p, int argc, int envc, NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0); NEW_AUX_ENT(FREEBSD_AT_NCPUS, (abi_ulong)bsd_get_ncpu()); NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry); + features = ELF_HWCAP; + NEW_AUX_ENT(FREEBSD_AT_HWCAP, features); +#ifdef ELF_HWCAP2 + features = ELF_HWCAP2; + NEW_AUX_ENT(FREEBSD_AT_HWCAP2, features); +#endif NEW_AUX_ENT(AT_UID, (abi_ulong)getuid()); NEW_AUX_ENT(AT_EUID, (abi_ulong)geteuid()); NEW_AUX_ENT(AT_GID, (abi_ulong)getgid()); NEW_AUX_ENT(AT_EGID, (abi_ulong)getegid()); - features = ELF_HWCAP; - NEW_AUX_ENT(FREEBSD_AT_HWCAP, features); target_auxents = sp; /* Note where the aux entries are in the target */ #ifdef ARCH_DLINFO /* diff --git a/bsd-user/freebsd/target_os_signal.h b/bsd-user/freebsd/target_os_signal.h index 3ed454e086..1a4c5faf19 100644 --- a/bsd-user/freebsd/target_os_signal.h +++ b/bsd-user/freebsd/target_os_signal.h @@ -1,6 +1,9 @@ #ifndef _TARGET_OS_SIGNAL_H_ #define _TARGET_OS_SIGNAL_H_ +/* FreeBSD's sys/ucontext.h defines this */ +#define TARGET_MC_GET_CLEAR_RET 0x0001 + #include "target_os_siginfo.h" #include "target_arch_signal.h" diff --git a/bsd-user/freebsd/target_os_user.h b/bsd-user/freebsd/target_os_user.h index 95b1fa9f99..19892c5071 100644 --- a/bsd-user/freebsd/target_os_user.h +++ b/bsd-user/freebsd/target_os_user.h @@ -61,15 +61,7 @@ struct target_sockaddr_storage { /* * from sys/user.h */ -#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200031 #define TARGET_KI_NSPARE_INT 2 -#elif defined(__FreeBSD_version) && __FreeBSD_version >= 1100000 -#define TARGET_KI_NSPARE_INT 4 -#elif defined(__FreeBSD_version) && __FreeBSD_version >= 1000000 -#define TARGET_KI_NSPARE_INT 7 -#else -#define TARGET_KI_NSPARE_INT 9 -#endif /* ! __FreeBSD_version >= 1000000 */ #define TARGET_KI_NSPARE_LONG 12 #define TARGET_KI_NSPARE_PTR 6 @@ -116,11 +108,7 @@ struct target_kinfo_proc { int32_t ki_tsid; /* Terminal session ID */ int16_t ki_jobc; /* job control counter */ int16_t ki_spare_short1; /* unused (just here for alignment) */ -#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200031 int32_t ki_tdev__freebsd11; /* controlling tty dev */ -#else - int32_t ki_tdev; /* controlling tty dev */ -#endif target_sigset_t ki_siglist; /* Signals arrived but not delivered */ target_sigset_t ki_sigmask; /* Current signal mask */ target_sigset_t ki_sigignore; /* Signals being ignored */ @@ -164,45 +152,24 @@ struct target_kinfo_proc { int8_t ki_nice; /* Process "nice" value */ char ki_lock; /* Process lock (prevent swap) count */ char ki_rqindex; /* Run queue index */ -#if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000 u_char ki_oncpu_old; /* Which cpu we are on (legacy) */ u_char ki_lastcpu_old; /* Last cpu we were on (legacy) */ -#else - u_char ki_oncpu; /* Which cpu we are on */ - u_char ki_lastcpu; /* Last cpu we were on */ -#endif /* ! __FreeBSD_version >= 1100000 */ -#if defined(__FreeBSD_version) && __FreeBSD_version >= 900000 char ki_tdname[TARGET_TDNAMLEN + 1]; /* thread name */ -#else - char ki_ocomm[TARGET_TDNAMLEN + 1]; /* thread name */ -#endif /* ! __FreeBSD_version >= 900000 */ char ki_wmesg[TARGET_WMESGLEN + 1]; /* wchan message */ char ki_login[TARGET_LOGNAMELEN + 1]; /* setlogin name */ char ki_lockname[TARGET_LOCKNAMELEN + 1]; /* lock name */ char ki_comm[TARGET_COMMLEN + 1]; /* command name */ char ki_emul[TARGET_KI_EMULNAMELEN + 1]; /* emulation name */ -#if defined(__FreeBSD_version) && __FreeBSD_version >= 900000 char ki_loginclass[TARGET_LOGINCLASSLEN + 1]; /* login class */ -#endif /* ! __FreeBSD_version >= 900000 */ -#if defined(__FreeBSD_version) && __FreeBSD_version >= 900000 char ki_sparestrings[50]; /* spare string space */ -#else - char ki_sparestrings[68]; /* spare string space */ -#endif /* ! __FreeBSD_version >= 900000 */ int32_t ki_spareints[TARGET_KI_NSPARE_INT]; /* spare room for growth */ -#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200031 - uint64_t ki_tdev; /* controlling tty dev */ -#endif -#if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000 + uint64_t ki_tdev; /* controlling tty dev */ int32_t ki_oncpu; /* Which cpu we are on */ int32_t ki_lastcpu; /* Last cpu we were on */ int32_t ki_tracer; /* Pid of tracing process */ -#endif /* __FreeBSD_version >= 1100000 */ -#if defined(__FreeBSD_version) && __FreeBSD_version >= 900000 int32_t ki_flag2; /* P2_* flags */ int32_t ki_fibnum; /* Default FIB number */ -#endif /* ! __FreeBSD_version >= 900000 */ uint32_t ki_cr_flags; /* Credential flags */ int32_t ki_jid; /* Process jail ID */ int32_t ki_numthreads; /* XXXKSE number of threads in total */ @@ -234,18 +201,8 @@ struct target_kinfo_file { int32_t kf_flags; /* Flags. */ int32_t kf_pad0; /* Round to 64 bit alignment. */ int64_t kf_offset; /* Seek location. */ -#if defined(__FreeBSD_version) && __FreeBSD_version < 1200031 - int32_t kf_vnode_type; /* Vnode type. */ - int32_t kf_sock_domain; /* Socket domain. */ - int32_t kf_sock_type; /* Socket type. */ - int32_t kf_sock_protocol; /* Socket protocol. */ - struct target_sockaddr_storage kf_sa_local; /* Socket address. */ - struct target_sockaddr_storage kf_sa_peer; /* Peer address. */ -#endif -#if defined(__FreeBSD_version) && __FreeBSD_version >= 900000 union { struct { -#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200031 uint32_t kf_spareint; /* Socket domain. */ int kf_sock_domain0; @@ -257,7 +214,6 @@ struct target_kinfo_file { struct sockaddr_storage kf_sa_local; /* Peer address. */ struct sockaddr_storage kf_sa_peer; -#endif /* Address of so_pcb. */ uint64_t kf_sock_pcb; /* Address of inp_ppcb. */ @@ -272,7 +228,6 @@ struct target_kinfo_file { uint32_t kf_sock_pad0; } kf_sock; struct { -#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200031 /* Vnode type. */ int kf_file_type; /* Space for future use */ @@ -290,16 +245,6 @@ struct target_kinfo_file { uint32_t kf_file_fsid_freebsd11; /* File device, FreeBSD 11 compat. */ uint32_t kf_file_rdev_freebsd11; -#else - /* Global file id. */ - uint64_t kf_file_fileid; - /* File size. */ - uint64_t kf_file_size; - /* Vnode filesystem id. */ - uint32_t kf_file_fsid; - /* File device. */ - uint32_t kf_file_rdev; -#endif /* File mode. */ uint16_t kf_file_mode; /* Round to 64 bit alignment. */ @@ -307,18 +252,14 @@ struct target_kinfo_file { uint32_t kf_file_pad1; } kf_file; struct { -#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200031 uint32_t kf_spareint[4]; uint64_t kf_spareint64[32]; -#endif uint32_t kf_sem_value; uint16_t kf_sem_mode; } kf_sem; struct { -#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200031 uint32_t kf_spareint[4]; uint64_t kf_spareint64[32]; -#endif uint64_t kf_pipe_addr; uint64_t kf_pipe_peer; uint32_t kf_pipe_buffer_cnt; @@ -326,7 +267,6 @@ struct target_kinfo_file { uint32_t kf_pipe_pad0[3]; } kf_pipe; struct { -#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200031 uint32_t kf_spareint[4]; uint64_t kf_spareint64[32]; uint32_t kf_pts_dev_freebsd11; @@ -334,34 +274,18 @@ struct target_kinfo_file { uint64_t kf_pts_dev; /* Round to 64 bit alignment. */ uint32_t kf_pts_pad1[4]; -#else - uint32_t kf_pts_dev; - /* Round to 64 bit alignment. */ - uint32_t kf_pts_pad0[7]; -#endif } kf_pts; struct { -#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200031 uint32_t kf_spareint[4]; uint64_t kf_spareint64[32]; -#endif int32_t kf_pid; } kf_proc; } kf_un; uint16_t kf_status; /* Status flags. */ uint16_t kf_pad1; /* Round to 32 bit alignment. */ int32_t _kf_ispare0; /* Space for more stuff. */ -#if defined(__FreeBSD_version) && __FreeBSD_version >= 1000000 target_cap_rights_t kf_cap_rights; /* Capability rights. */ uint64_t _kf_cap_spare; /* Space for future cap_rights_t. */ -#else /* ! __FreeBSD_version >= 1000000 */ - uint64_t kf_cap_rights; - int _kf_ispare[4]; -#endif /* ! __FreeBSD_version >= 1000000 */ - -#else /* ! __FreeBSD_version >= 900000 */ - int _kf_ispare[16]; -#endif /* ! __FreeBSD_version >= 900000 */ /* Truncated before copyout in sysctl */ char kf_path[PATH_MAX]; /* Path to file, if any. */ }; @@ -372,34 +296,19 @@ struct target_kinfo_vmentry { uint64_t kve_start; /* Starting address. */ uint64_t kve_end; /* Finishing address. */ uint64_t kve_offset; /* Mapping offset in object */ -#if defined(__FreeBSD_version) && __FreeBSD_version >= 900000 uint64_t kve_vn_fileid; /* inode number if vnode */ -#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200031 uint32_t kve_vn_fsid_freebsd11; /* dev_t of vnode location */ -#else - uint32_t kve_vn_fsid; /* dev_t of vnode location */ -#endif -#else /* ! __FreeBSD_version >= 900000 */ - uint64_t kve_fileid; /* inode number if vnode */ - uint32_t kve_fsid; /* dev_t of vnode location */ -#endif /* ! __FreeBSD_version >= 900000 */ int32_t kve_flags; /* Flags on map entry. */ int32_t kve_resident; /* Number of resident pages. */ int32_t kve_private_resident; /* Number of private pages. */ int32_t kve_protection; /* Protection bitmask. */ int32_t kve_ref_count; /* VM obj ref count. */ int32_t kve_shadow_count; /* VM obj shadow count. */ -#if defined(__FreeBSD_version) && __FreeBSD_version >= 900000 int32_t kve_vn_type; /* Vnode type. */ uint64_t kve_vn_size; /* File size. */ -#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200031 uint32_t kve_vn_rdev_freebsd11; /* Device id if device. */ -#else - uint32_t kve_vn_rdev; /* Device id if device. */ -#endif uint16_t kve_vn_mode; /* File mode. */ uint16_t kve_status; /* Status flags. */ -#if defined(__FreeBSD_version) && __FreeBSD_version >= 1200031 #if (__FreeBSD_version >= 1300501 && __FreeBSD_version < 1400000) || \ __FreeBSD_version >= 1400009 union { @@ -413,13 +322,6 @@ struct target_kinfo_vmentry { #endif uint64_t kve_vn_rdev; /* Device id if device. */ int _kve_ispare[8]; /* Space for more stuff. */ -#else - int32_t _kve_ispare[12]; /* Space for more stuff. */ -#endif -#else /* ! __FreeBSD_version >= 900000 */ - int _kve_pad0; - int32_t _kve_ispare[16]; /* Space for more stuff. */ -#endif /* ! __FreeBSD_version >= 900000 */ /* Truncated before copyout in sysctl */ char kve_path[PATH_MAX]; /* Path to VM obj, if any. */ }; diff --git a/bsd-user/i386/target_arch_cpu.h b/bsd-user/i386/target_arch_cpu.h index 978e8066af..b28602adbb 100644 --- a/bsd-user/i386/target_arch_cpu.h +++ b/bsd-user/i386/target_arch_cpu.h @@ -23,8 +23,6 @@ #define TARGET_DEFAULT_CPU_MODEL "qemu32" -#define TARGET_CPU_RESET(cpu) - static inline void target_cpu_init(CPUX86State *env, struct target_pt_regs *regs) { diff --git a/bsd-user/i386/target_arch_signal.h b/bsd-user/i386/target_arch_signal.h index 9812c6b034..a90750d602 100644 --- a/bsd-user/i386/target_arch_signal.h +++ b/bsd-user/i386/target_arch_signal.h @@ -27,8 +27,6 @@ #define TARGET_MINSIGSTKSZ (512 * 4) /* min sig stack size */ #define TARGET_SIGSTKSZ (MINSIGSTKSZ + 32768) /* recommended size */ -#define TARGET_MC_GET_CLEAR_RET 0x0001 - struct target_sigcontext { /* to be added */ }; diff --git a/bsd-user/main.c b/bsd-user/main.c index 48643eeabc..cb5ea40236 100644 --- a/bsd-user/main.c +++ b/bsd-user/main.c @@ -195,6 +195,15 @@ static void usage(void) __thread CPUState *thread_cpu; +void stop_all_tasks(void) +{ + /* + * We trust when using NPTL (pthreads) start_exclusive() handles thread + * stopping correctly. + */ + start_exclusive(); +} + bool qemu_cpu_is_self(CPUState *cpu) { return thread_cpu == cpu; @@ -210,7 +219,6 @@ void init_task_state(TaskState *ts) { int i; - ts->used = 1; ts->first_free = ts->sigqueue_table; for (i = 0; i < MAX_SIGQUEUE_SIZE - 1; i++) { ts->sigqueue_table[i].next = &ts->sigqueue_table[i + 1]; diff --git a/bsd-user/meson.build b/bsd-user/meson.build index 0369549340..87885d91ed 100644 --- a/bsd-user/meson.build +++ b/bsd-user/meson.build @@ -1,3 +1,7 @@ +if not have_bsd_user + subdir_done() +endif + bsd_user_ss.add(files( 'bsdload.c', 'elfload.c', @@ -8,3 +12,6 @@ bsd_user_ss.add(files( 'syscall.c', 'uaccess.c', )) + +# Pull in the OS-specific build glue, if any +subdir(targetos) diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c index b40ab9045f..13cb32dba1 100644 --- a/bsd-user/mmap.c +++ b/bsd-user/mmap.c @@ -21,8 +21,6 @@ #include "qemu.h" #include "qemu-common.h" -//#define DEBUG_MMAP - static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER; static __thread int mmap_lock_count; @@ -67,14 +65,11 @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot) abi_ulong end, host_start, host_end, addr; int prot1, ret; -#ifdef DEBUG_MMAP - printf("mprotect: start=0x" TARGET_ABI_FMT_lx - "len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len, - prot & PROT_READ ? 'r' : '-', - prot & PROT_WRITE ? 'w' : '-', - prot & PROT_EXEC ? 'x' : '-'); -#endif - + qemu_log_mask(CPU_LOG_PAGE, "mprotect: start=0x" TARGET_ABI_FMT_lx + " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len, + prot & PROT_READ ? 'r' : '-', + prot & PROT_WRITE ? 'w' : '-', + prot & PROT_EXEC ? 'x' : '-'); if ((start & ~TARGET_PAGE_MASK) != 0) return -EINVAL; len = TARGET_PAGE_ALIGN(len); @@ -132,7 +127,27 @@ error: return ret; } -/* map an incomplete host page */ +/* + * map an incomplete host page + * + * mmap_frag can be called with a valid fd, if flags doesn't contain one of + * MAP_ANON, MAP_STACK, MAP_GUARD. If we need to map a page in those cases, we + * pass fd == -1. However, if flags contains MAP_GUARD then MAP_ANON cannot be + * added. + * + * * If fd is valid (not -1) we want to map the pages with MAP_ANON. + * * If flags contains MAP_GUARD we don't want to add MAP_ANON because it + * will be rejected. See kern_mmap's enforcing of constraints for MAP_GUARD + * in sys/vm/vm_mmap.c. + * * If flags contains MAP_ANON it doesn't matter if we add it or not. + * * If flags contains MAP_STACK, mmap adds MAP_ANON when called so doesn't + * matter if we add it or not either. See enforcing of constraints for + * MAP_STACK in kern_mmap. + * + * Don't add MAP_ANON for the flags that use fd == -1 without specifying the + * flags directly, with the assumption that future flags that require fd == -1 + * will also not require MAP_ANON. + */ static int mmap_frag(abi_ulong real_start, abi_ulong start, abi_ulong end, int prot, int flags, int fd, abi_ulong offset) @@ -152,9 +167,9 @@ static int mmap_frag(abi_ulong real_start, } if (prot1 == 0) { - /* no page was there, so we allocate one */ + /* no page was there, so we allocate one. See also above. */ void *p = mmap(host_start, qemu_host_page_size, prot, - flags | MAP_ANON, -1, 0); + flags | ((fd != -1) ? MAP_ANON : 0), -1, 0); if (p == MAP_FAILED) return -1; prot1 = prot; @@ -162,7 +177,7 @@ static int mmap_frag(abi_ulong real_start, prot1 &= PAGE_BITS; prot_new = prot | prot1; - if (!(flags & MAP_ANON)) { + if (fd != -1) { /* msync() won't work here, so we return an error if write is possible while it is a shared mapping */ if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED && @@ -174,16 +189,20 @@ static int mmap_frag(abi_ulong real_start, mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE); /* read the corresponding file data */ - pread(fd, g2h_untagged(start), end - start, offset); + if (pread(fd, g2h_untagged(start), end - start, offset) == -1) { + return -1; + } /* put final protection */ if (prot_new != (prot1 | PROT_WRITE)) mprotect(host_start, qemu_host_page_size, prot_new); } else { - /* just update the protection */ if (prot_new != prot1) { mprotect(host_start, qemu_host_page_size, prot_new); } + if (prot_new & PROT_WRITE) { + memset(g2h_untagged(start), 0, end - start); + } } return 0; } @@ -281,14 +300,10 @@ static abi_ulong mmap_find_vma_aligned(abi_ulong start, abi_ulong size, addr = start; wrapped = repeat = 0; prev = 0; - flags = MAP_ANONYMOUS | MAP_PRIVATE; -#ifdef MAP_ALIGNED + flags = MAP_ANON | MAP_PRIVATE; if (alignment != 0) { flags |= MAP_ALIGNED(alignment); } -#else - /* XXX TODO */ -#endif for (;; prev = ptr) { /* @@ -391,57 +406,48 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len; mmap_lock(); -#ifdef DEBUG_MMAP - { - printf("mmap: start=0x" TARGET_ABI_FMT_lx - " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=", - start, len, - prot & PROT_READ ? 'r' : '-', - prot & PROT_WRITE ? 'w' : '-', - prot & PROT_EXEC ? 'x' : '-'); + if (qemu_loglevel_mask(CPU_LOG_PAGE)) { + qemu_log("mmap: start=0x" TARGET_ABI_FMT_lx + " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=", + start, len, + prot & PROT_READ ? 'r' : '-', + prot & PROT_WRITE ? 'w' : '-', + prot & PROT_EXEC ? 'x' : '-'); if (flags & MAP_ALIGNMENT_MASK) { - printf("MAP_ALIGNED(%u) ", (flags & MAP_ALIGNMENT_MASK) - >> MAP_ALIGNMENT_SHIFT); + qemu_log("MAP_ALIGNED(%u) ", + (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT); } -#if MAP_GUARD if (flags & MAP_GUARD) { - printf("MAP_GUARD "); + qemu_log("MAP_GUARD "); } -#endif if (flags & MAP_FIXED) { - printf("MAP_FIXED "); + qemu_log("MAP_FIXED "); } - if (flags & MAP_ANONYMOUS) { - printf("MAP_ANON "); + if (flags & MAP_ANON) { + qemu_log("MAP_ANON "); } -#ifdef MAP_EXCL if (flags & MAP_EXCL) { - printf("MAP_EXCL "); + qemu_log("MAP_EXCL "); } -#endif if (flags & MAP_PRIVATE) { - printf("MAP_PRIVATE "); + qemu_log("MAP_PRIVATE "); } if (flags & MAP_SHARED) { - printf("MAP_SHARED "); + qemu_log("MAP_SHARED "); } if (flags & MAP_NOCORE) { - printf("MAP_NOCORE "); + qemu_log("MAP_NOCORE "); } -#ifdef MAP_STACK if (flags & MAP_STACK) { - printf("MAP_STACK "); + qemu_log("MAP_STACK "); } -#endif - printf("fd=%d offset=0x%llx\n", fd, offset); + qemu_log("fd=%d offset=0x%lx\n", fd, offset); } -#endif - if ((flags & MAP_ANONYMOUS) && fd != -1) { + if ((flags & MAP_ANON) && fd != -1) { errno = EINVAL; goto fail; } -#ifdef MAP_STACK if (flags & MAP_STACK) { if ((fd != -1) || ((prot & (PROT_READ | PROT_WRITE)) != (PROT_READ | PROT_WRITE))) { @@ -449,8 +455,6 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, goto fail; } } -#endif /* MAP_STACK */ -#ifdef MAP_GUARD if ((flags & MAP_GUARD) && (prot != PROT_NONE || fd != -1 || offset != 0 || (flags & (MAP_SHARED | MAP_PRIVATE | /* MAP_PREFAULT | */ /* MAP_PREFAULT not in mman.h */ @@ -458,18 +462,24 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, errno = EINVAL; goto fail; } -#endif if (offset & ~TARGET_PAGE_MASK) { errno = EINVAL; goto fail; } - len = TARGET_PAGE_ALIGN(len); if (len == 0) { errno = EINVAL; goto fail; } + + /* Check for overflows */ + len = TARGET_PAGE_ALIGN(len); + if (len == 0) { + errno = ENOMEM; + goto fail; + } + real_start = start & qemu_host_page_mask; host_offset = offset & qemu_host_page_mask; @@ -536,7 +546,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, * qemu_real_host_page_size */ p = mmap(g2h_untagged(start), host_len, prot, - flags | MAP_FIXED | ((fd != -1) ? MAP_ANONYMOUS : 0), -1, 0); + flags | MAP_FIXED | ((fd != -1) ? MAP_ANON : 0), -1, 0); if (p == MAP_FAILED) goto fail; /* update start so that it points to the file position at 'offset' */ @@ -564,18 +574,16 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, * It can fail only on 64-bit host with 32-bit target. * On any other target/host host mmap() handles this error correctly. */ -#if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64 - if ((unsigned long)start + len - 1 > (abi_ulong) -1) { + if (!guest_range_valid_untagged(start, len)) { errno = EINVAL; goto fail; } -#endif /* * worst case: we cannot map the file because the offset is not * aligned, so we read it */ - if (!(flags & MAP_ANON) && + if (fd != -1 && (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) { /* * msync() won't work here, so we return an error if write is @@ -591,17 +599,22 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, -1, 0); if (retaddr == -1) goto fail; - pread(fd, g2h_untagged(start), len, offset); + if (pread(fd, g2h_untagged(start), len, offset) == -1) { + goto fail; + } if (!(prot & PROT_WRITE)) { ret = target_mprotect(start, len, prot); - if (ret != 0) { - start = ret; - goto the_end; - } + assert(ret == 0); } goto the_end; } + /* Reject the mapping if any page within the range is mapped */ + if ((flags & MAP_EXCL) && page_check_range(start, len, 0) < 0) { + errno = EINVAL; + goto fail; + } + /* handle the start of the mapping */ if (start > real_start) { if (real_end == real_start + qemu_host_page_size) { @@ -697,8 +710,7 @@ static void mmap_reserve(abi_ulong start, abi_ulong size) } if (real_start != real_end) { mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE, - MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, - -1, 0); + MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0); } } diff --git a/bsd-user/qemu.h b/bsd-user/qemu.h index 522d6c4031..1b3b974afe 100644 --- a/bsd-user/qemu.h +++ b/bsd-user/qemu.h @@ -17,7 +17,6 @@ #ifndef QEMU_H #define QEMU_H - #include "qemu/osdep.h" #include "cpu.h" #include "qemu/units.h" @@ -73,15 +72,15 @@ struct image_info { #define MAX_SIGQUEUE_SIZE 1024 -struct sigqueue { - struct sigqueue *next; +struct qemu_sigqueue { + struct qemu_sigqueue *next; + target_siginfo_t info; }; struct emulated_sigtable { int pending; /* true if signal is pending */ - struct sigqueue *first; - /* in order to always have memory for the first signal, we put it here */ - struct sigqueue info; + struct qemu_sigqueue *first; + struct qemu_sigqueue info; /* Put first signal info here */ }; /* @@ -92,18 +91,18 @@ typedef struct TaskState { struct TaskState *next; struct bsd_binprm *bprm; - int used; /* non zero if used */ struct image_info *info; struct emulated_sigtable sigtab[TARGET_NSIG]; - struct sigqueue sigqueue_table[MAX_SIGQUEUE_SIZE]; /* siginfo queue */ - struct sigqueue *first_free; /* first free siginfo queue entry */ + struct qemu_sigqueue sigqueue_table[MAX_SIGQUEUE_SIZE]; /* siginfo queue */ + struct qemu_sigqueue *first_free; /* first free siginfo queue entry */ int signal_pending; /* non zero if a signal may be pending */ uint8_t stack[]; } __attribute__((aligned(16))) TaskState; void init_task_state(TaskState *ts); +void stop_all_tasks(void); extern const char *qemu_uname_release; /* @@ -209,6 +208,7 @@ void process_pending_signals(CPUArchState *cpu_env); void signal_init(void); long do_sigreturn(CPUArchState *env); long do_rt_sigreturn(CPUArchState *env); +void queue_signal(CPUArchState *env, int sig, target_siginfo_t *info); abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp); /* mmap.c */ @@ -235,6 +235,13 @@ extern unsigned long target_dflssiz; extern unsigned long target_maxssiz; extern unsigned long target_sgrowsiz; +/* syscall.c */ +abi_long get_errno(abi_long ret); +bool is_error(abi_long ret); + +/* os-sys.c */ +abi_long do_freebsd_sysarch(void *cpu_env, abi_long arg1, abi_long arg2); + /* user access */ #define VERIFY_READ PAGE_READ diff --git a/bsd-user/signal.c b/bsd-user/signal.c index ad6d935569..05b277c642 100644 --- a/bsd-user/signal.c +++ b/bsd-user/signal.c @@ -16,10 +16,24 @@ * You should have received a copy of the GNU General Public License * along with this program; if not, see . */ -#include "qemu/osdep.h" +#include "qemu/osdep.h" #include "qemu.h" +/* + * Stubbed out routines until we merge signal support from bsd-user + * fork. + */ + +/* + * Queue a signal so that it will be send to the virtual CPU as soon as + * possible. + */ +void queue_signal(CPUArchState *env, int sig, target_siginfo_t *info) +{ + qemu_log_mask(LOG_UNIMP, "No signal queueing, dropping signal %d\n", sig); +} + void signal_init(void) { } @@ -27,3 +41,19 @@ void signal_init(void) void process_pending_signals(CPUArchState *cpu_env) { } + +void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr, + MMUAccessType access_type, bool maperr, uintptr_t ra) +{ + qemu_log_mask(LOG_UNIMP, "No signal support for SIGSEGV\n"); + /* unreachable */ + abort(); +} + +void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr, + MMUAccessType access_type, uintptr_t ra) +{ + qemu_log_mask(LOG_UNIMP, "No signal support for SIGBUS\n"); + /* unreachable */ + abort(); +} diff --git a/bsd-user/syscall.c b/bsd-user/syscall.c index 372836d44d..d3322760f4 100644 --- a/bsd-user/syscall.c +++ b/bsd-user/syscall.c @@ -33,18 +33,18 @@ static abi_ulong target_brk; static abi_ulong target_original_brk; -static inline abi_long get_errno(abi_long ret) +abi_long get_errno(abi_long ret) { - if (ret == -1) + if (ret == -1) { /* XXX need to translate host -> target errnos here */ return -(errno); - else - return ret; + } + return ret; } #define target_to_host_bitmask(x, tbl) (x) -static inline int is_error(abi_long ret) +bool is_error(abi_long ret) { return (abi_ulong)ret >= (abi_ulong)(-4096); } @@ -88,56 +88,6 @@ static abi_long do_obreak(abi_ulong new_brk) return 0; } -#if defined(TARGET_I386) -static abi_long do_freebsd_sysarch(CPUX86State *env, int op, abi_ulong parms) -{ - abi_long ret = 0; - abi_ulong val; - int idx; - - switch (op) { -#ifdef TARGET_ABI32 - case TARGET_FREEBSD_I386_SET_GSBASE: - case TARGET_FREEBSD_I386_SET_FSBASE: - if (op == TARGET_FREEBSD_I386_SET_GSBASE) -#else - case TARGET_FREEBSD_AMD64_SET_GSBASE: - case TARGET_FREEBSD_AMD64_SET_FSBASE: - if (op == TARGET_FREEBSD_AMD64_SET_GSBASE) -#endif - idx = R_GS; - else - idx = R_FS; - if (get_user(val, parms, abi_ulong)) - return -TARGET_EFAULT; - cpu_x86_load_seg(env, idx, 0); - env->segs[idx].base = val; - break; -#ifdef TARGET_ABI32 - case TARGET_FREEBSD_I386_GET_GSBASE: - case TARGET_FREEBSD_I386_GET_FSBASE: - if (op == TARGET_FREEBSD_I386_GET_GSBASE) -#else - case TARGET_FREEBSD_AMD64_GET_GSBASE: - case TARGET_FREEBSD_AMD64_GET_FSBASE: - if (op == TARGET_FREEBSD_AMD64_GET_GSBASE) -#endif - idx = R_GS; - else - idx = R_FS; - val = env->segs[idx].base; - if (put_user(val, parms, abi_ulong)) - return -TARGET_EFAULT; - break; - /* XXX handle the others... */ - default: - ret = -TARGET_EINVAL; - break; - } - return ret; -} -#endif - #ifdef __FreeBSD__ /* * XXX this uses the undocumented oidfmt interface to find the kind of diff --git a/bsd-user/x86_64/target_arch_cpu.h b/bsd-user/x86_64/target_arch_cpu.h index 5f5ee602f9..5172b230f0 100644 --- a/bsd-user/x86_64/target_arch_cpu.h +++ b/bsd-user/x86_64/target_arch_cpu.h @@ -23,8 +23,6 @@ #define TARGET_DEFAULT_CPU_MODEL "qemu64" -#define TARGET_CPU_RESET(cpu) - static inline void target_cpu_init(CPUX86State *env, struct target_pt_regs *regs) { diff --git a/bsd-user/x86_64/target_arch_signal.h b/bsd-user/x86_64/target_arch_signal.h index 4c1ff0e5ba..4bb753b08b 100644 --- a/bsd-user/x86_64/target_arch_signal.h +++ b/bsd-user/x86_64/target_arch_signal.h @@ -27,8 +27,6 @@ #define TARGET_MINSIGSTKSZ (512 * 4) /* min sig stack size */ #define TARGET_SIGSTKSZ (MINSIGSTKSZ + 32768) /* recommended size */ -#define TARGET_MC_GET_CLEAR_RET 0x0001 - struct target_sigcontext { /* to be added */ }; diff --git a/chardev/char-mux.c b/chardev/char-mux.c index ada0c6866f..ee2d47b20d 100644 --- a/chardev/char-mux.c +++ b/chardev/char-mux.c @@ -28,6 +28,7 @@ #include "qemu/option.h" #include "chardev/char.h" #include "sysemu/block-backend.h" +#include "qapi/qapi-commands-control.h" #include "chardev-internal.h" /* MUX driver for serial I/O splitting */ @@ -157,7 +158,7 @@ static int mux_proc_byte(Chardev *chr, MuxChardev *d, int ch) { const char *term = "QEMU: Terminated\n\r"; qemu_chr_write_all(chr, (uint8_t *)term, strlen(term)); - exit(0); + qmp_quit(NULL); break; } case 's': diff --git a/chardev/meson.build b/chardev/meson.build index 32377af383..325ba2bdb9 100644 --- a/chardev/meson.build +++ b/chardev/meson.build @@ -35,7 +35,7 @@ if brlapi.found() chardev_modules += { 'baum': module_ss } endif -if config_host.has_key('CONFIG_SPICE') +if spice.found() module_ss = ss.source_set() module_ss.add(when: [spice], if_true: files('spice.c')) chardev_modules += { 'spice': module_ss } diff --git a/configs/devices/i386-softmmu/default.mak b/configs/devices/i386-softmmu/default.mak index 84d1a2487c..598c6646df 100644 --- a/configs/devices/i386-softmmu/default.mak +++ b/configs/devices/i386-softmmu/default.mak @@ -22,6 +22,7 @@ #CONFIG_TPM_CRB=n #CONFIG_TPM_TIS_ISA=n #CONFIG_VTD=n +#CONFIG_SGX=n # Boards: # diff --git a/configs/targets/aarch64-linux-user.mak b/configs/targets/aarch64-linux-user.mak index 4713253709..d0c603c54e 100644 --- a/configs/targets/aarch64-linux-user.mak +++ b/configs/targets/aarch64-linux-user.mak @@ -1,5 +1,5 @@ TARGET_ARCH=aarch64 TARGET_BASE_ARCH=arm -TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml +TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml TARGET_HAS_BFLT=y CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y diff --git a/configs/targets/aarch64-softmmu.mak b/configs/targets/aarch64-softmmu.mak index 7703127674..d489e6da83 100644 --- a/configs/targets/aarch64-softmmu.mak +++ b/configs/targets/aarch64-softmmu.mak @@ -1,5 +1,5 @@ TARGET_ARCH=aarch64 TARGET_BASE_ARCH=arm TARGET_SUPPORTS_MTTCG=y -TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml +TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml TARGET_NEED_FDT=y diff --git a/configs/targets/aarch64_be-linux-user.mak b/configs/targets/aarch64_be-linux-user.mak index fae831558d..d3ee10c00f 100644 --- a/configs/targets/aarch64_be-linux-user.mak +++ b/configs/targets/aarch64_be-linux-user.mak @@ -1,6 +1,6 @@ TARGET_ARCH=aarch64 TARGET_BASE_ARCH=arm TARGET_WORDS_BIGENDIAN=y -TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml +TARGET_XML_FILES= gdb-xml/aarch64-core.xml gdb-xml/aarch64-fpu.xml TARGET_HAS_BFLT=y CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y diff --git a/configs/targets/arm-linux-user.mak b/configs/targets/arm-linux-user.mak index e741ffd4d3..3e10d6b15d 100644 --- a/configs/targets/arm-linux-user.mak +++ b/configs/targets/arm-linux-user.mak @@ -1,6 +1,6 @@ TARGET_ARCH=arm TARGET_SYSTBL_ABI=common,oabi TARGET_SYSTBL=syscall.tbl -TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml +TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml TARGET_HAS_BFLT=y CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y diff --git a/configs/targets/arm-softmmu.mak b/configs/targets/arm-softmmu.mak index 84a98f4818..92c8349b96 100644 --- a/configs/targets/arm-softmmu.mak +++ b/configs/targets/arm-softmmu.mak @@ -1,4 +1,4 @@ TARGET_ARCH=arm TARGET_SUPPORTS_MTTCG=y -TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml +TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml TARGET_NEED_FDT=y diff --git a/configs/targets/armeb-linux-user.mak b/configs/targets/armeb-linux-user.mak index 255e44e8b0..f81e5bf1fe 100644 --- a/configs/targets/armeb-linux-user.mak +++ b/configs/targets/armeb-linux-user.mak @@ -2,6 +2,6 @@ TARGET_ARCH=arm TARGET_SYSTBL_ABI=common,oabi TARGET_SYSTBL=syscall.tbl TARGET_WORDS_BIGENDIAN=y -TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml +TARGET_XML_FILES= gdb-xml/arm-core.xml gdb-xml/arm-vfp.xml gdb-xml/arm-vfp3.xml gdb-xml/arm-vfp-sysregs.xml gdb-xml/arm-neon.xml gdb-xml/arm-m-profile.xml gdb-xml/arm-m-profile-mve.xml TARGET_HAS_BFLT=y CONFIG_ARM_COMPATIBLE_SEMIHOSTING=y diff --git a/configs/targets/i386-softmmu.mak b/configs/targets/i386-softmmu.mak index 5babf71895..6b3c99fc86 100644 --- a/configs/targets/i386-softmmu.mak +++ b/configs/targets/i386-softmmu.mak @@ -1,3 +1,4 @@ TARGET_ARCH=i386 TARGET_SUPPORTS_MTTCG=y +TARGET_NEED_FDT=y TARGET_XML_FILES= gdb-xml/i386-32bit.xml diff --git a/configs/targets/x86_64-softmmu.mak b/configs/targets/x86_64-softmmu.mak index 75e42bc840..197817c943 100644 --- a/configs/targets/x86_64-softmmu.mak +++ b/configs/targets/x86_64-softmmu.mak @@ -1,4 +1,5 @@ TARGET_ARCH=x86_64 TARGET_BASE_ARCH=i386 TARGET_SUPPORTS_MTTCG=y +TARGET_NEED_FDT=y TARGET_XML_FILES= gdb-xml/i386-64bit.xml diff --git a/configure b/configure index f15d85384e..7f5a34b350 100755 --- a/configure +++ b/configure @@ -142,11 +142,11 @@ lines: ${BASH_LINENO[*]}" } do_cc() { - do_compiler "$cc" "$@" + do_compiler "$cc" $CPU_CFLAGS "$@" } do_cxx() { - do_compiler "$cxx" "$@" + do_compiler "$cxx" $CPU_CFLAGS "$@" } # Append $2 to the variable named $1, with space separation @@ -158,7 +158,7 @@ update_cxxflags() { # Set QEMU_CXXFLAGS from QEMU_CFLAGS by filtering out those # options which some versions of GCC's C++ compiler complain about # because they only make sense for C programs. - QEMU_CXXFLAGS="$QEMU_CXXFLAGS -D__STDC_LIMIT_MACROS -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS" + QEMU_CXXFLAGS="-D__STDC_LIMIT_MACROS -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS" CONFIGURE_CXXFLAGS=$(echo "$CONFIGURE_CFLAGS" | sed s/-std=gnu11/-std=gnu++11/) for arg in $QEMU_CFLAGS; do case $arg in @@ -174,14 +174,14 @@ update_cxxflags() { compile_object() { local_cflags="$1" - do_cc $CFLAGS $CONFIGURE_CFLAGS $QEMU_CFLAGS $local_cflags -c -o $TMPO $TMPC + do_cc $CFLAGS $EXTRA_CFLAGS $CONFIGURE_CFLAGS $QEMU_CFLAGS $local_cflags -c -o $TMPO $TMPC } compile_prog() { local_cflags="$1" local_ldflags="$2" - do_cc $CFLAGS $CONFIGURE_CFLAGS $QEMU_CFLAGS $local_cflags -o $TMPE $TMPC \ - $LDFLAGS $CONFIGURE_LDFLAGS $QEMU_LDFLAGS $local_ldflags + do_cc $CFLAGS $EXTRA_CFLAGS $CONFIGURE_CFLAGS $QEMU_CFLAGS $local_cflags -o $TMPE $TMPC \ + $LDFLAGS $EXTRA_LDFLAGS $CONFIGURE_LDFLAGS $QEMU_LDFLAGS $local_ldflags } # symbolically link $1 to $2. Portable version of "ln -sf". @@ -216,10 +216,6 @@ version_ge () { done } -have_backend () { - echo "$trace_backends" | grep "$1" >/dev/null -} - glob() { eval test -z '"${1#'"$2"'}"' } @@ -240,12 +236,11 @@ interp_prefix="/usr/gnemul/qemu-%M" static="no" cross_compile="no" cross_prefix="" -audio_drv_list="" +audio_drv_list="default" block_drv_rw_whitelist="" block_drv_ro_whitelist="" block_drv_whitelist_tools="no" host_cc="cc" -audio_win_int="" libs_qga="" debug_info="yes" lto="false" @@ -291,36 +286,12 @@ for opt do esac done -brlapi="auto" -curl="auto" -iconv="auto" -curses="auto" -docs="auto" -fdt="auto" -netmap="no" -sdl="auto" -sdl_image="auto" -coreaudio="auto" -virtiofsd="auto" -virtfs="auto" -libudev="auto" -mpath="auto" -vnc="auto" -sparse="auto" -vde="$default_feature" -vnc_sasl="auto" -vnc_jpeg="auto" -vnc_png="auto" -xkbcommon="auto" -xen=${default_feature:+disabled} +EXTRA_CFLAGS="" +EXTRA_CXXFLAGS="" +EXTRA_LDFLAGS="" + xen_ctrl_version="$default_feature" -xen_pci_passthrough="auto" -linux_aio="$default_feature" -linux_io_uring="auto" -cap_ng="auto" -attr="auto" xfs="$default_feature" -tcg="enabled" membarrier="$default_feature" vhost_kernel="$default_feature" vhost_net="$default_feature" @@ -328,15 +299,8 @@ vhost_crypto="$default_feature" vhost_scsi="$default_feature" vhost_vsock="$default_feature" vhost_user="no" -vhost_user_blk_server="auto" vhost_user_fs="$default_feature" vhost_vdpa="$default_feature" -bpf="auto" -kvm="auto" -hax="auto" -hvf="auto" -whpx="auto" -nvmm="auto" rdma="$default_feature" pvrdma="$default_feature" gprof="no" @@ -346,79 +310,43 @@ sanitizers="no" tsan="no" fortify_source="$default_feature" strip_opt="yes" -tcg_interpreter="false" -bigendian="no" mingw32="no" gcov="no" EXESUF="" -HOST_DSOSUF=".so" modules="no" module_upgrades="no" prefix="/usr/local" qemu_suffix="qemu" -slirp="auto" -oss_lib="" bsd="no" linux="no" solaris="no" profiler="no" -cocoa="auto" softmmu="yes" linux_user="no" bsd_user="no" -blobs="true" pkgversion="" pie="" qom_cast_debug="yes" trace_backends="log" trace_file="trace" -spice="$default_feature" -spice_protocol="auto" -rbd="auto" -smartcard="auto" -u2f="auto" -libusb="auto" -usb_redir="auto" opengl="$default_feature" cpuid_h="no" avx2_opt="$default_feature" -capstone="auto" -lzo="auto" -snappy="auto" -bzip2="auto" -lzfse="auto" -zstd="auto" guest_agent="$default_feature" guest_agent_with_vss="no" guest_agent_ntddscsi="no" -guest_agent_msi="auto" vss_win32_sdk="$default_feature" win_sdk="no" want_tools="$default_feature" -libiscsi="auto" -libnfs="auto" coroutine="" coroutine_pool="$default_feature" debug_stack_usage="no" crypto_afalg="no" -cfi="false" -cfi_debug="false" -seccomp="auto" -glusterfs="auto" -gtk="auto" tls_priority="NORMAL" -gnutls="auto" -nettle="auto" -gcrypt="auto" -auth_pam="auto" -vte="auto" -virglrenderer="auto" tpm="$default_feature" libssh="$default_feature" live_block_migration=${default_feature:-yes} numa="$default_feature" -tcmalloc="no" -jemalloc="no" replication=${default_feature:-yes} bochs=${default_feature:-yes} cloop=${default_feature:-yes} @@ -428,26 +356,34 @@ vdi=${default_feature:-yes} vvfat=${default_feature:-yes} qed=${default_feature:-yes} parallels=${default_feature:-yes} -libxml2="auto" debug_mutex="no" -libpmem="auto" -default_devices="true" plugins="$default_feature" -fuzzing="no" rng_none="no" secret_keyring="$default_feature" -libdaxctl="auto" meson="" +meson_args="" ninja="" +gio="$default_feature" skip_meson=no -gettext="auto" -fuse="auto" -fuse_lseek="auto" -multiprocess="auto" slirp_smbd="$default_feature" -malloc_trim="auto" -gio="$default_feature" +# The following Meson options are handled manually (still they +# are included in the automatically generated help message) + +# 1. Track which submodules are needed +capstone="auto" +fdt="auto" +slirp="auto" + +# 2. Support --with/--without option +default_devices="true" + +# 3. Automatically enable/disable other options +tcg="enabled" +cfi="false" + +# 4. Detection partly done in configure +xen=${default_feature:+disabled} # parse CC options second for opt do @@ -462,13 +398,13 @@ for opt do ;; --cpu=*) cpu="$optarg" ;; - --extra-cflags=*) QEMU_CFLAGS="$QEMU_CFLAGS $optarg" - QEMU_LDFLAGS="$QEMU_LDFLAGS $optarg" + --extra-cflags=*) + EXTRA_CFLAGS="$EXTRA_CFLAGS $optarg" + EXTRA_CXXFLAGS="$EXTRA_CXXFLAGS $optarg" + ;; + --extra-cxxflags=*) EXTRA_CXXFLAGS="$EXTRA_CXXFLAGS $optarg" ;; - --extra-cxxflags=*) QEMU_CXXFLAGS="$QEMU_CXXFLAGS $optarg" - ;; - --extra-ldflags=*) QEMU_LDFLAGS="$QEMU_LDFLAGS $optarg" - EXTRA_LDFLAGS="$optarg" + --extra-ldflags=*) EXTRA_LDFLAGS="$EXTRA_LDFLAGS $optarg" ;; --enable-debug-info) debug_info="yes" ;; @@ -529,11 +465,13 @@ sdl2_config="${SDL2_CONFIG-${cross_prefix}sdl2-config}" # left shift of signed integers is well defined and has the expected # 2s-complement style results. (Both clang and gcc agree that it # provides these semantics.) -QEMU_CFLAGS="-fno-strict-aliasing -fno-common -fwrapv $QEMU_CFLAGS" +QEMU_CFLAGS="-fno-strict-aliasing -fno-common -fwrapv" QEMU_CFLAGS="-Wundef -Wwrite-strings -Wmissing-prototypes $QEMU_CFLAGS" QEMU_CFLAGS="-Wstrict-prototypes -Wredundant-decls $QEMU_CFLAGS" QEMU_CFLAGS="-D_GNU_SOURCE -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE $QEMU_CFLAGS" +QEMU_LDFLAGS= + # Flags that are needed during configure but later taken care of by Meson CONFIGURE_CFLAGS="-std=gnu11 -Wall" CONFIGURE_LDFLAGS= @@ -563,15 +501,6 @@ int main(void) { return 0; } EOF } -write_c_fuzzer_skeleton() { - cat > $TMPC < -#include -int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size); -int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { return 0; } -EOF -} - if check_define __linux__ ; then targetos="Linux" elif check_define _WIN32 ; then @@ -604,9 +533,6 @@ fi # cross-compiling to one of these OSes then you'll need to specify # the correct CPU with the --cpu option. case $targetos in -Darwin) - HOST_DSOSUF=".dylib" - ;; SunOS) # $(uname -m) returns i86pc even on an x86_64 box, so default based on isainfo if test -z "$cpu" && test "$(isainfo -k)" = "amd64"; then @@ -650,11 +576,7 @@ elif check_define __s390__ ; then cpu="s390" fi elif check_define __riscv ; then - if check_define _LP64 ; then - cpu="riscv64" - else - cpu="riscv32" - fi + cpu="riscv" elif check_define __arm__ ; then cpu="arm" elif check_define __aarch64__ ; then @@ -667,7 +589,7 @@ ARCH= # Normalise host CPU name and set ARCH. # Note that this case should only have supported host CPUs, not guests. case "$cpu" in - ppc|ppc64|s390x|sparc64|x32|riscv32|riscv64) + ppc|ppc64|s390x|sparc64|x32|riscv) ;; ppc64le) ARCH="ppc64" @@ -704,54 +626,34 @@ fi case $targetos in MINGW32*) mingw32="yes" - audio_possible_drivers="dsound sdl" - if check_include dsound.h; then - audio_drv_list="dsound" - else - audio_drv_list="" - fi supported_os="yes" plugins="no" pie="no" ;; GNU/kFreeBSD) bsd="yes" - audio_drv_list="oss try-sdl" - audio_possible_drivers="oss sdl pa" ;; FreeBSD) bsd="yes" bsd_user="yes" make="${MAKE-gmake}" - audio_drv_list="oss try-sdl" - audio_possible_drivers="oss sdl pa" # needed for kinfo_getvmmap(3) in libutil.h - netmap="" # enable netmap autodetect ;; DragonFly) bsd="yes" make="${MAKE-gmake}" - audio_drv_list="oss try-sdl" - audio_possible_drivers="oss sdl pa" ;; NetBSD) bsd="yes" make="${MAKE-gmake}" - audio_drv_list="oss try-sdl" - audio_possible_drivers="oss sdl" - oss_lib="-lossaudio" ;; OpenBSD) bsd="yes" make="${MAKE-gmake}" - audio_drv_list="try-sdl" - audio_possible_drivers="sdl" ;; Darwin) bsd="yes" darwin="yes" - audio_drv_list="try-coreaudio try-sdl" - audio_possible_drivers="coreaudio sdl" # Disable attempts to use ObjectiveC features in os/object.h since they # won't work when we're compiling with gcc as a C compiler. QEMU_CFLAGS="-DOS_OBJECT_USE_OBJC=0 $QEMU_CFLAGS" @@ -760,10 +662,6 @@ SunOS) solaris="yes" make="${MAKE-gmake}" smbd="${SMBD-/usr/sfw/sbin/smbd}" - if test -f /usr/include/sys/soundcard.h ; then - audio_drv_list="oss try-sdl" - fi - audio_possible_drivers="oss sdl" # needed for CMSG_ macros in sys/socket.h QEMU_CFLAGS="-D_XOPEN_SOURCE=600 $QEMU_CFLAGS" # needed for TIOCWIN* defines in termios.h @@ -775,8 +673,6 @@ Haiku) QEMU_CFLAGS="-DB_USE_POSITIVE_POSIX_ERRORS -D_BSD_SOURCE -fPIC $QEMU_CFLAGS" ;; Linux) - audio_drv_list="try-pa oss" - audio_possible_drivers="oss alsa sdl pa" linux="yes" linux_user="yes" vhost_user=${default_feature:-yes} @@ -820,7 +716,6 @@ fi if test "$mingw32" = "yes" ; then EXESUF=".exe" - HOST_DSOSUF=".dll" # MinGW needs -mthreads for TLS and macro _MT. CONFIGURE_CFLAGS="-mthreads $CONFIGURE_CFLAGS" write_c_skeleton; @@ -833,6 +728,18 @@ werror="" as_shared_lib="no" as_static_lib="no" +. $source_path/scripts/meson-buildoptions.sh + +meson_options= +meson_option_parse() { + meson_options="$meson_options $(_meson_option_parse "$@")" + if test $? -eq 1; then + echo "ERROR: unknown option $1" + echo "Try '$0 --help' for more information" + exit 1 + fi +} + for opt do optarg=$(expr "x$opt" : 'x[^=]*=\(.*\)') case "$opt" in @@ -906,11 +813,6 @@ for opt do error_exit "Can't mix --target-list-exclude with --target-list" fi ;; - --enable-trace-backends=*) trace_backends="$optarg" - ;; - # XXX: backwards compatibility - --enable-trace-backend=*) trace_backends="$optarg" - ;; --with-trace-file=*) trace_file="$optarg" ;; --with-default-devices) default_devices="true" @@ -973,44 +875,10 @@ for opt do # configure to be used by RPM and similar macros that set # lots of directory switches by default. ;; - --disable-sdl) sdl="disabled" - ;; - --enable-sdl) sdl="enabled" - ;; - --disable-sdl-image) sdl_image="disabled" - ;; - --enable-sdl-image) sdl_image="enabled" - ;; --disable-qom-cast-debug) qom_cast_debug="no" ;; --enable-qom-cast-debug) qom_cast_debug="yes" ;; - --disable-virtfs) virtfs="disabled" - ;; - --enable-virtfs) virtfs="enabled" - ;; - --disable-libudev) libudev="disabled" - ;; - --enable-libudev) libudev="enabled" - ;; - --disable-virtiofsd) virtiofsd="disabled" - ;; - --enable-virtiofsd) virtiofsd="enabled" - ;; - --disable-mpath) mpath="disabled" - ;; - --enable-mpath) mpath="enabled" - ;; - --disable-vnc) vnc="disabled" - ;; - --enable-vnc) vnc="enabled" - ;; - --disable-gettext) gettext="disabled" - ;; - --enable-gettext) gettext="enabled" - ;; - --oss-lib=*) oss_lib="$optarg" - ;; --audio-drv-list=*) audio_drv_list="$optarg" ;; --block-drv-rw-whitelist=*|--block-drv-whitelist=*) block_drv_rw_whitelist=$(echo "$optarg" | sed -e 's/,/ /g') @@ -1041,115 +909,27 @@ for opt do ;; --disable-tsan) tsan="no" ;; - --enable-sparse) sparse="enabled" - ;; - --disable-sparse) sparse="disabled" - ;; --disable-strip) strip_opt="no" ;; - --disable-vnc-sasl) vnc_sasl="disabled" - ;; - --enable-vnc-sasl) vnc_sasl="enabled" - ;; - --disable-vnc-jpeg) vnc_jpeg="disabled" - ;; - --enable-vnc-jpeg) vnc_jpeg="enabled" - ;; - --disable-vnc-png) vnc_png="disabled" - ;; - --enable-vnc-png) vnc_png="enabled" - ;; --disable-slirp) slirp="disabled" ;; --enable-slirp) slirp="enabled" ;; --enable-slirp=git) slirp="internal" ;; - --enable-slirp=system) slirp="system" - ;; - --disable-vde) vde="no" - ;; - --enable-vde) vde="yes" - ;; - --disable-netmap) netmap="no" - ;; - --enable-netmap) netmap="yes" + --enable-slirp=*) slirp="$optarg" ;; --disable-xen) xen="disabled" ;; --enable-xen) xen="enabled" ;; - --disable-xen-pci-passthrough) xen_pci_passthrough="disabled" - ;; - --enable-xen-pci-passthrough) xen_pci_passthrough="enabled" - ;; - --disable-brlapi) brlapi="disabled" - ;; - --enable-brlapi) brlapi="enabled" - ;; - --disable-kvm) kvm="disabled" - ;; - --enable-kvm) kvm="enabled" - ;; - --disable-hax) hax="disabled" - ;; - --enable-hax) hax="enabled" - ;; - --disable-hvf) hvf="disabled" - ;; - --enable-hvf) hvf="enabled" - ;; - --disable-nvmm) nvmm="disabled" - ;; - --enable-nvmm) nvmm="enabled" - ;; - --disable-whpx) whpx="disabled" - ;; - --enable-whpx) whpx="enabled" - ;; - --disable-tcg-interpreter) tcg_interpreter="false" - ;; - --enable-tcg-interpreter) tcg_interpreter="true" - ;; - --disable-cap-ng) cap_ng="disabled" - ;; - --enable-cap-ng) cap_ng="enabled" - ;; --disable-tcg) tcg="disabled" plugins="no" ;; --enable-tcg) tcg="enabled" ;; - --disable-malloc-trim) malloc_trim="disabled" - ;; - --enable-malloc-trim) malloc_trim="enabled" - ;; - --disable-spice) spice="no" - ;; - --enable-spice) - spice_protocol="yes" - spice="yes" - ;; - --disable-spice-protocol) - spice_protocol="no" - spice="no" - ;; - --enable-spice-protocol) spice_protocol="yes" - ;; - --disable-libiscsi) libiscsi="disabled" - ;; - --enable-libiscsi) libiscsi="enabled" - ;; - --disable-libnfs) libnfs="disabled" - ;; - --enable-libnfs) libnfs="enabled" - ;; --enable-profiler) profiler="yes" ;; - --disable-cocoa) cocoa="disabled" - ;; - --enable-cocoa) cocoa="enabled" - ;; --disable-system) softmmu="no" ;; --enable-system) softmmu="yes" @@ -1193,52 +973,18 @@ for opt do ;; --disable-cfi) cfi="false" ;; - --enable-cfi-debug) cfi_debug="true" - ;; - --disable-cfi-debug) cfi_debug="false" - ;; - --disable-curses) curses="disabled" - ;; - --enable-curses) curses="enabled" - ;; - --disable-iconv) iconv="disabled" - ;; - --enable-iconv) iconv="enabled" - ;; - --disable-curl) curl="disabled" - ;; - --enable-curl) curl="enabled" - ;; --disable-fdt) fdt="disabled" ;; --enable-fdt) fdt="enabled" ;; --enable-fdt=git) fdt="internal" ;; - --enable-fdt=system) fdt="system" - ;; - --disable-linux-aio) linux_aio="no" - ;; - --enable-linux-aio) linux_aio="yes" - ;; - --disable-linux-io-uring) linux_io_uring="disabled" - ;; - --enable-linux-io-uring) linux_io_uring="enabled" - ;; - --disable-attr) attr="disabled" - ;; - --enable-attr) attr="enabled" + --enable-fdt=*) fdt="$optarg" ;; --disable-membarrier) membarrier="no" ;; --enable-membarrier) membarrier="yes" ;; - --disable-bpf) bpf="disabled" - ;; - --enable-bpf) bpf="enabled" - ;; - --disable-blobs) blobs="false" - ;; --with-pkgversion=*) pkgversion="$optarg" ;; --with-coroutine=*) coroutine="$optarg" @@ -1253,10 +999,6 @@ for opt do ;; --disable-crypto-afalg) crypto_afalg="no" ;; - --disable-docs) docs="disabled" - ;; - --enable-docs) docs="enabled" - ;; --disable-vhost-net) vhost_net="no" ;; --enable-vhost-net) vhost_net="yes" @@ -1273,10 +1015,6 @@ for opt do ;; --enable-vhost-vsock) vhost_vsock="yes" ;; - --disable-vhost-user-blk-server) vhost_user_blk_server="disabled" - ;; - --enable-vhost-user-blk-server) vhost_user_blk_server="enabled" - ;; --disable-vhost-user-fs) vhost_user_fs="no" ;; --enable-vhost-user-fs) vhost_user_fs="yes" @@ -1285,60 +1023,16 @@ for opt do ;; --enable-opengl) opengl="yes" ;; - --disable-rbd) rbd="disabled" - ;; - --enable-rbd) rbd="enabled" - ;; --disable-xfsctl) xfs="no" ;; --enable-xfsctl) xfs="yes" ;; - --disable-smartcard) smartcard="disabled" - ;; - --enable-smartcard) smartcard="enabled" - ;; - --disable-u2f) u2f="disabled" - ;; - --enable-u2f) u2f="enabled" - ;; - --disable-libusb) libusb="disabled" - ;; - --enable-libusb) libusb="enabled" - ;; - --disable-usb-redir) usb_redir="disabled" - ;; - --enable-usb-redir) usb_redir="enabled" - ;; --disable-zlib-test) ;; - --disable-lzo) lzo="disabled" - ;; - --enable-lzo) lzo="enabled" - ;; - --disable-snappy) snappy="disabled" - ;; - --enable-snappy) snappy="enabled" - ;; - --disable-bzip2) bzip2="disabled" - ;; - --enable-bzip2) bzip2="enabled" - ;; - --enable-lzfse) lzfse="enabled" - ;; - --disable-lzfse) lzfse="disabled" - ;; - --disable-zstd) zstd="disabled" - ;; - --enable-zstd) zstd="enabled" - ;; --enable-guest-agent) guest_agent="yes" ;; --disable-guest-agent) guest_agent="no" ;; - --enable-guest-agent-msi) guest_agent_msi="enabled" - ;; - --disable-guest-agent-msi) guest_agent_msi="disabled" - ;; --with-vss-sdk) vss_win32_sdk="" ;; --with-vss-sdk=*) vss_win32_sdk="$optarg" @@ -1355,12 +1049,6 @@ for opt do ;; --disable-tools) want_tools="no" ;; - --enable-seccomp) seccomp="enabled" - ;; - --disable-seccomp) seccomp="disabled" - ;; - --disable-glusterfs) glusterfs="disabled" - ;; --disable-avx2) avx2_opt="no" ;; --enable-avx2) avx2_opt="yes" @@ -1369,9 +1057,6 @@ for opt do ;; --enable-avx512f) avx512f_opt="yes" ;; - - --enable-glusterfs) glusterfs="enabled" - ;; --disable-virtio-blk-data-plane|--enable-virtio-blk-data-plane) echo "$0: $opt is obsolete, virtio-blk data-plane is always on" >&2 ;; @@ -1381,28 +1066,8 @@ for opt do --enable-uuid|--disable-uuid) echo "$0: $opt is obsolete, UUID support is always built" >&2 ;; - --disable-gtk) gtk="disabled" - ;; - --enable-gtk) gtk="enabled" - ;; --tls-priority=*) tls_priority="$optarg" ;; - --disable-gnutls) gnutls="disabled" - ;; - --enable-gnutls) gnutls="enabled" - ;; - --disable-nettle) nettle="disabled" - ;; - --enable-nettle) nettle="enabled" - ;; - --disable-gcrypt) gcrypt="disabled" - ;; - --enable-gcrypt) gcrypt="enabled" - ;; - --disable-auth-pam) auth_pam="disabled" - ;; - --enable-auth-pam) auth_pam="enabled" - ;; --enable-rdma) rdma="yes" ;; --disable-rdma) rdma="no" @@ -1411,14 +1076,6 @@ for opt do ;; --disable-pvrdma) pvrdma="no" ;; - --disable-vte) vte="disabled" - ;; - --enable-vte) vte="enabled" - ;; - --disable-virglrenderer) virglrenderer="disabled" - ;; - --enable-virglrenderer) virglrenderer="enabled" - ;; --disable-tpm) tpm="no" ;; --enable-tpm) tpm="yes" @@ -1435,18 +1092,6 @@ for opt do ;; --enable-numa) numa="yes" ;; - --disable-libxml2) libxml2="disabled" - ;; - --enable-libxml2) libxml2="enabled" - ;; - --disable-tcmalloc) tcmalloc="no" - ;; - --enable-tcmalloc) tcmalloc="yes" - ;; - --disable-jemalloc) jemalloc="no" - ;; - --enable-jemalloc) jemalloc="yes" - ;; --disable-replication) replication="no" ;; --enable-replication) replication="yes" @@ -1501,18 +1146,10 @@ for opt do ;; --enable-capstone=git) capstone="internal" ;; - --enable-capstone=system) capstone="system" + --enable-capstone=*) capstone="$optarg" ;; --with-git=*) git="$optarg" ;; - --enable-git-update) - git_submodules_action="update" - echo "--enable-git-update deprecated, use --with-git-submodules=update" - ;; - --disable-git-update) - git_submodules_action="validate" - echo "--disable-git-update deprecated, use --with-git-submodules=validate" - ;; --with-git-submodules=*) git_submodules_action="$optarg" ;; @@ -1520,14 +1157,6 @@ for opt do ;; --disable-debug-mutex) debug_mutex=no ;; - --enable-libpmem) libpmem="enabled" - ;; - --disable-libpmem) libpmem="disabled" - ;; - --enable-xkbcommon) xkbcommon="enabled" - ;; - --disable-xkbcommon) xkbcommon="disabled" - ;; --enable-plugins) if test "$mingw32" = "yes"; then error_exit "TCG plugins not currently supported on Windows platforms" else @@ -1540,10 +1169,6 @@ for opt do ;; --disable-containers) use_containers="no" ;; - --enable-fuzzing) fuzzing=yes - ;; - --disable-fuzzing) fuzzing=no - ;; --gdb=*) gdb_bin="$optarg" ;; --enable-rng-none) rng_none=yes @@ -1554,22 +1179,6 @@ for opt do ;; --disable-keyring) secret_keyring="no" ;; - --enable-libdaxctl) libdaxctl="enabled" - ;; - --disable-libdaxctl) libdaxctl="disabled" - ;; - --enable-fuse) fuse="enabled" - ;; - --disable-fuse) fuse="disabled" - ;; - --enable-fuse-lseek) fuse_lseek="enabled" - ;; - --disable-fuse-lseek) fuse_lseek="disabled" - ;; - --enable-multiprocess) multiprocess="enabled" - ;; - --disable-multiprocess) multiprocess="disabled" - ;; --enable-gio) gio=yes ;; --disable-gio) gio=no @@ -1578,6 +1187,18 @@ for opt do ;; --disable-slirp-smbd) slirp_smbd=no ;; + # backwards compatibility options + --enable-trace-backend=*) meson_option_parse "--enable-trace-backends=$optarg" "$optarg" + ;; + --disable-blobs) meson_option_parse --disable-install-blobs "" + ;; + --enable-tcmalloc) meson_option_parse --enable-malloc=tcmalloc tcmalloc + ;; + --enable-jemalloc) meson_option_parse --enable-malloc=jemalloc jemalloc + ;; + # everything else has the same name in configure and meson + --enable-* | --disable-*) meson_option_parse "$opt" "$optarg" + ;; --as-shared-lib) as_shared_lib="yes" QEMU_CFLAGS="$QEMU_CFLAGS -fPIC -DAS_LIB=1" @@ -1656,51 +1277,27 @@ firmwarepath="${firmwarepath:-$datadir/qemu-firmware}" localedir="${localedir:-$datadir/locale}" case "$cpu" in - ppc) - CPU_CFLAGS="-m32" - QEMU_LDFLAGS="-m32 $QEMU_LDFLAGS" - ;; - ppc64) - CPU_CFLAGS="-m64" - QEMU_LDFLAGS="-m64 $QEMU_LDFLAGS" - ;; - sparc) - CPU_CFLAGS="-m32 -mv8plus -mcpu=ultrasparc" - QEMU_LDFLAGS="-m32 -mv8plus $QEMU_LDFLAGS" - ;; - sparc64) - CPU_CFLAGS="-m64 -mcpu=ultrasparc" - QEMU_LDFLAGS="-m64 $QEMU_LDFLAGS" - ;; - s390) - CPU_CFLAGS="-m31" - QEMU_LDFLAGS="-m31 $QEMU_LDFLAGS" - ;; - s390x) - CPU_CFLAGS="-m64" - QEMU_LDFLAGS="-m64 $QEMU_LDFLAGS" - ;; - i386) - CPU_CFLAGS="-m32" - QEMU_LDFLAGS="-m32 $QEMU_LDFLAGS" - ;; - x86_64) - # ??? Only extremely old AMD cpus do not have cmpxchg16b. - # If we truly care, we should simply detect this case at - # runtime and generate the fallback to serial emulation. - CPU_CFLAGS="-m64 -mcx16" - QEMU_LDFLAGS="-m64 $QEMU_LDFLAGS" - ;; - x32) - CPU_CFLAGS="-mx32" - QEMU_LDFLAGS="-mx32 $QEMU_LDFLAGS" - ;; + ppc) CPU_CFLAGS="-m32" ;; + ppc64) CPU_CFLAGS="-m64" ;; + sparc) CPU_CFLAGS="-m32 -mv8plus -mcpu=ultrasparc" ;; + sparc64) CPU_CFLAGS="-m64 -mcpu=ultrasparc" ;; + s390) CPU_CFLAGS="-m31" ;; + s390x) CPU_CFLAGS="-m64" ;; + i386) CPU_CFLAGS="-m32" ;; + x32) CPU_CFLAGS="-mx32" ;; + + # ??? Only extremely old AMD cpus do not have cmpxchg16b. + # If we truly care, we should simply detect this case at + # runtime and generate the fallback to serial emulation. + x86_64) CPU_CFLAGS="-m64 -mcx16" ;; + # No special flags required for other host CPUs esac -eval "cross_cc_${cpu}=\$cc" -cross_cc_vars="$cross_cc_vars cross_cc_${cpu}" -QEMU_CFLAGS="$CPU_CFLAGS $QEMU_CFLAGS" +if eval test -z "\${cross_cc_$cpu}"; then + eval "cross_cc_${cpu}=\$cc" + cross_cc_vars="$cross_cc_vars cross_cc_${cpu}" +fi # For user-mode emulation the host arch has to be one we explicitly # support, even if we're using TCI. @@ -1741,9 +1338,6 @@ for config in $mak_wilds; do fi done -# Enumerate public trace backends for --help output -trace_backend_list=$(echo $(grep -le '^PUBLIC = True$' "$source_path"/scripts/tracetool/backend/*.py | sed -e 's/^.*\/\(.*\)\.py$/\1/')) - if test x"$show_help" = x"yes" ; then cat << EOF @@ -1770,8 +1364,8 @@ Advanced options (experts only): build time --cxx=CXX use C++ compiler CXX [$cxx] --objcc=OBJCC use Objective-C compiler OBJCC [$objcc] - --extra-cflags=CFLAGS append extra C compiler flags QEMU_CFLAGS - --extra-cxxflags=CXXFLAGS append extra C++ compiler flags QEMU_CXXFLAGS + --extra-cflags=CFLAGS append extra C compiler flags CFLAGS + --extra-cxxflags=CXXFLAGS append extra C++ compiler flags CXXFLAGS --extra-ldflags=LDFLAGS append extra linker flags LDFLAGS --cross-cc-ARCH=CC use compiler when building ARCH guest test cases --cross-cc-flags-ARCH= use compiler flags when building ARCH guest tests @@ -1810,8 +1404,7 @@ Advanced options (experts only): --disable-strip disable stripping binaries --disable-werror disable compilation abort on warning --disable-stack-protector disable compiler-provided stack protection - --audio-drv-list=LIST set audio drivers list: - Available drivers: $audio_possible_drivers + --audio-drv-list=LIST set audio drivers to try if -audiodev is not used --block-drv-whitelist=L Same as --block-drv-rw-whitelist=L --block-drv-rw-whitelist=L set block driver read-write whitelist @@ -1821,19 +1414,12 @@ Advanced options (experts only): (by default affects only QEMU, not tools like qemu-img) --enable-block-drv-whitelist-in-tools use block whitelist also in tools instead of only QEMU - --enable-trace-backends=B Set trace backend - Available backends: $trace_backend_list --with-trace-file=NAME Full PATH,NAME of file to store traces Default:trace- - --disable-slirp disable SLIRP userspace network connectivity - --enable-tcg-interpreter enable TCI (TCG with bytecode interpreter, experimental and slow) - --enable-malloc-trim enable libc malloc_trim() for memory optimization - --oss-lib path to OSS library --cpu=CPU Build for host CPU [$cpu] --with-coroutine=BACKEND coroutine backend. Supported options: ucontext, sigaltstack, windows --enable-gcov enable test coverage analysis with gcov - --disable-blobs disable installing provided firmware blobs --with-vss-sdk=SDK-path enable Windows VSS support in QEMU Guest Agent --with-win-sdk=SDK-path path to Windows Platform SDK (to build VSS .tlb) --tls-priority default TLS protocol/cipher priority string @@ -1845,113 +1431,41 @@ Advanced options (experts only): enable plugins via shared library loading --disable-containers don't use containers for cross-building --gdb=GDB-path gdb to use for gdbstub tests [$gdb_bin] - -Optional features, enabled with --enable-FEATURE and -disabled with --disable-FEATURE, default is enabled if available -(unless built with --without-default-features): - +EOF + meson_options_help +cat << EOF system all system emulation targets user supported user emulation targets linux-user all linux usermode emulation targets bsd-user all BSD usermode emulation targets - docs build documentation guest-agent build the QEMU Guest Agent - guest-agent-msi build guest agent Windows MSI installation package pie Position Independent Executables modules modules support (non-Windows) module-upgrades try to load modules from alternate paths for upgrades debug-tcg TCG debugging (default is disabled) debug-info debugging information lto Enable Link-Time Optimization. - sparse sparse checker safe-stack SafeStack Stack Smash Protection. Depends on clang/llvm >= 3.7 and requires coroutine backend ucontext. - cfi Enable Control-Flow Integrity for indirect function calls. - In case of a cfi violation, QEMU is terminated with SIGILL - Depends on lto and is incompatible with modules - Automatically enables Link-Time Optimization (lto) - cfi-debug In case of a cfi violation, a message containing the line that - triggered the error is written to stderr. After the error, - QEMU is still terminated with SIGILL - gnutls GNUTLS cryptography support - nettle nettle cryptography support - gcrypt libgcrypt cryptography support - auth-pam PAM access control - sdl SDL UI - sdl-image SDL Image support for icons - gtk gtk UI - vte vte support for the gtk UI - curses curses UI - iconv font glyph conversion support - vnc VNC UI support - vnc-sasl SASL encryption for VNC server - vnc-jpeg JPEG lossy compression for VNC server - vnc-png PNG compression for VNC server - cocoa Cocoa UI (Mac OS X only) - virtfs VirtFS - virtiofsd build virtiofs daemon (virtiofsd) - libudev Use libudev to enumerate host devices - mpath Multipath persistent reservation passthrough - xen xen backend driver support - xen-pci-passthrough PCI passthrough support for Xen - brlapi BrlAPI (Braile) - curl curl connectivity membarrier membarrier system call (for Linux 4.14+ or Windows) - fdt fdt device tree - kvm KVM acceleration support - hax HAX acceleration support - hvf Hypervisor.framework acceleration support - nvmm NVMM acceleration support - whpx Windows Hypervisor Platform acceleration support rdma Enable RDMA-based migration pvrdma Enable PVRDMA support - vde support for vde network - netmap support for netmap network - linux-aio Linux AIO support - linux-io-uring Linux io_uring support - cap-ng libcap-ng support - attr attr and xattr support vhost-net vhost-net kernel acceleration support vhost-vsock virtio sockets device support vhost-scsi vhost-scsi kernel target support vhost-crypto vhost-user-crypto backend support vhost-kernel vhost kernel backend support vhost-user vhost-user backend support - vhost-user-blk-server vhost-user-blk server support vhost-vdpa vhost-vdpa kernel backend support - bpf BPF kernel support - spice spice - spice-protocol spice-protocol - rbd rados block device (rbd) - libiscsi iscsi support - libnfs nfs support - smartcard smartcard support (libcacard) - u2f U2F support (u2f-emu) - libusb libusb (for usb passthrough) live-block-migration Block migration in the main migration stream - usb-redir usb network redirection support - lzo support of lzo compression library - snappy support of snappy compression library - bzip2 support of bzip2 compression library - (for reading bzip2-compressed dmg images) - lzfse support of lzfse compression library - (for reading lzfse-compressed dmg images) - zstd support for zstd compression library - (for migration compression and qcow2 cluster compression) - seccomp seccomp support coroutine-pool coroutine freelist (better performance) - glusterfs GlusterFS backend tpm TPM support libssh ssh block device support numa libnuma support - libxml2 for Parallels image format - tcmalloc tcmalloc support - jemalloc jemalloc support avx2 AVX2 optimization support avx512f AVX512F optimization support replication replication support opengl opengl support - virglrenderer virgl rendering support xfsctl xfsctl support qom-cast-debug cast debugging support tools build qemu-io, qemu-nbd and qemu-img tools @@ -1964,15 +1478,8 @@ disabled with --disable-FEATURE, default is enabled if available qed qed image format support parallels parallels image format support crypto-afalg Linux AF_ALG crypto backend driver - capstone capstone disassembler support debug-mutex mutex debugging support - libpmem libpmem support - xkbcommon xkbcommon support rng-none dummy RNG, avoid using /dev/(u)random and getrandom() - libdaxctl libdaxctl support - fuse FUSE block device export - fuse-lseek SEEK_HOLE/SEEK_DATA support for FUSE exports - multiprocess Out of process device emulation support gio libgio support slirp-smbd use smbd (at path --smbd=*) in slirp networking @@ -2007,7 +1514,7 @@ python_version=$($python -c 'import sys; print("%d.%d.%d" % (sys.version_info[0] python="$python -B" if test -z "$meson"; then - if test "$explicit_python" = no && has meson && version_ge "$(meson --version)" 0.55.3; then + if test "$explicit_python" = no && has meson && version_ge "$(meson --version)" 0.59.3; then meson=meson elif test $git_submodules_action != 'ignore' ; then meson=git @@ -2106,8 +1613,8 @@ cat > $TMPC << EOF # endif # endif #elif defined(__GNUC__) && defined(__GNUC_MINOR__) -# if __GNUC__ < 7 || (__GNUC__ == 7 && __GNUC_MINOR__ < 5) -# error You need at least GCC v7.5.0 to compile QEMU +# if __GNUC__ < 7 || (__GNUC__ == 7 && __GNUC_MINOR__ < 4) +# error You need at least GCC v7.4.0 to compile QEMU # endif #else # error You either need GCC or Clang to compiler QEMU @@ -2115,7 +1622,7 @@ cat > $TMPC << EOF int main (void) { return 0; } EOF if ! compile_prog "" "" ; then - error_exit "You need at least GCC v7.5 or Clang v6.0 (or XCode Clang v10.0)" + error_exit "You need at least GCC v7.4 or Clang v6.0 (or XCode Clang v10.0)" fi # Accumulate -Wfoo and -Wno-bar separately. @@ -2231,17 +1738,6 @@ if test "$static" = "yes" ; then fi fi -# Unconditional check for compiler __thread support - cat > $TMPC << EOF -static __thread int tls_var; -int main(void) { return tls_var; } -EOF - -if ! compile_prog "-Werror" "" ; then - error_exit "Your compiler does not support the __thread specifier for " \ - "Thread-Local Storage (TLS). Please upgrade to a version that does." -fi - cat > $TMPC << EOF #ifdef __linux__ @@ -2253,9 +1749,11 @@ static THREAD int tls_var; int main(void) { return tls_var; } EOF -# Check we support --no-pie first; we will need this for building ROMs. +# Check we support -fno-pie and -no-pie first; we will need the former for +# building ROMs, and both for everything if --disable-pie is passed. if compile_prog "-Werror -fno-pie" "-no-pie"; then CFLAGS_NOPIE="-fno-pie" + LDFLAGS_NOPIE="-no-pie" fi if test "$static" = "yes"; then @@ -2271,6 +1769,7 @@ if test "$static" = "yes"; then fi elif test "$pie" = "no"; then CONFIGURE_CFLAGS="$CFLAGS_NOPIE $CONFIGURE_CFLAGS" + CONFIGURE_LDFLAGS="$LDFLAGS_NOPIE $CONFIGURE_LDFLAGS" elif compile_prog "-Werror -fPIE -DPIE" "-pie"; then CONFIGURE_CFLAGS="-fPIE -DPIE $CONFIGURE_CFLAGS" CONFIGURE_LDFLAGS="-pie $CONFIGURE_LDFLAGS" @@ -2313,21 +1812,6 @@ EOF fi fi -######################################### -# Solaris specific configure tool chain decisions - -if test "$solaris" = "yes" ; then - if has ar; then - : - else - if test -f /usr/ccs/bin/ar ; then - error_exit "No path includes ar" \ - "Add /usr/ccs/bin to your path and rerun configure" - fi - error_exit "No path includes ar" - fi -fi - if test "$tcg" = "enabled"; then git_submodules="$git_submodules tests/fp/berkeley-testfloat-3" git_submodules="$git_submodules tests/fp/berkeley-softfloat-3" @@ -2415,38 +1899,6 @@ if test -z "$want_tools"; then fi fi -########################################## -# Disable features only meaningful for system-mode emulation -if test "$softmmu" = "no"; then - audio_drv_list="" -fi - -########################################## -# L2TPV3 probe - -cat > $TMPC < -#include -int main(void) { return sizeof(struct mmsghdr); } -EOF -if compile_prog "" "" ; then - l2tpv3=yes -else - l2tpv3=no -fi - -cat > $TMPC < -int main(int argc, char *argv[]) { - return mlockall(MCL_FUTURE); -} -EOF -if compile_prog "" "" ; then - have_mlockall=yes -else - have_mlockall=no -fi - ######################################### # vhost interdependencies and host support @@ -2506,25 +1958,6 @@ if ! has "$pkg_config_exe"; then error_exit "pkg-config binary '$pkg_config_exe' not found" fi -########################################## -# NPTL probe - -if test "$linux_user" = "yes"; then - cat > $TMPC < -#include -int main(void) { -#if !defined(CLONE_SETTLS) || !defined(FUTEX_WAIT) -#error bork -#endif - return 0; -} -EOF - if ! compile_object ; then - feature_not_found "nptl" "Install glibc and linux kernel headers." - fi -fi - ########################################## # xen probe @@ -2947,183 +2380,6 @@ EOF fi fi -########################################## -# vde libraries probe -if test "$vde" != "no" ; then - vde_libs="-lvdeplug" - cat > $TMPC << EOF -#include -int main(void) -{ - struct vde_open_args a = {0, 0, 0}; - char s[] = ""; - vde_open(s, s, &a); - return 0; -} -EOF - if compile_prog "" "$vde_libs" ; then - vde=yes - else - if test "$vde" = "yes" ; then - feature_not_found "vde" "Install vde (Virtual Distributed Ethernet) devel" - fi - vde=no - fi -fi - -########################################## -# netmap support probe -# Apart from looking for netmap headers, we make sure that the host API version -# supports the netmap backend (>=11). The upper bound (15) is meant to simulate -# a minor/major version number. Minor new features will be marked with values up -# to 15, and if something happens that requires a change to the backend we will -# move above 15, submit the backend fixes and modify this two bounds. -if test "$netmap" != "no" ; then - cat > $TMPC << EOF -#include -#include -#include -#include -#if (NETMAP_API < 11) || (NETMAP_API > 15) -#error -#endif -int main(void) { return 0; } -EOF - if compile_prog "" "" ; then - netmap=yes - else - if test "$netmap" = "yes" ; then - feature_not_found "netmap" - fi - netmap=no - fi -fi - -########################################## -# detect CoreAudio -if test "$coreaudio" != "no" ; then - coreaudio_libs="-framework CoreAudio" - cat > $TMPC << EOF -#include -int main(void) -{ - return (int)AudioGetCurrentHostTime(); -} -EOF - if compile_prog "" "$coreaudio_libs" ; then - coreaudio=yes - else - coreaudio=no - fi -fi - -########################################## -# Sound support libraries probe - -audio_drv_list=$(echo "$audio_drv_list" | sed -e 's/,/ /g') -for drv in $audio_drv_list; do - case $drv in - alsa | try-alsa) - if $pkg_config alsa --exists; then - alsa_libs=$($pkg_config alsa --libs) - alsa_cflags=$($pkg_config alsa --cflags) - alsa=yes - if test "$drv" = "try-alsa"; then - audio_drv_list=$(echo "$audio_drv_list" | sed -e 's/try-alsa/alsa/') - fi - else - if test "$drv" = "try-alsa"; then - audio_drv_list=$(echo "$audio_drv_list" | sed -e 's/try-alsa//') - else - error_exit "$drv check failed" \ - "Make sure to have the $drv libs and headers installed." - fi - fi - ;; - - pa | try-pa) - if $pkg_config libpulse --exists; then - libpulse=yes - pulse_libs=$($pkg_config libpulse --libs) - pulse_cflags=$($pkg_config libpulse --cflags) - if test "$drv" = "try-pa"; then - audio_drv_list=$(echo "$audio_drv_list" | sed -e 's/try-pa/pa/') - fi - else - if test "$drv" = "try-pa"; then - audio_drv_list=$(echo "$audio_drv_list" | sed -e 's/try-pa//') - else - error_exit "$drv check failed" \ - "Make sure to have the $drv libs and headers installed." - fi - fi - ;; - - sdl) - if test "$sdl" = "no"; then - error_exit "sdl not found or disabled, can not use sdl audio driver" - fi - ;; - - try-sdl) - if test "$sdl" = "no"; then - audio_drv_list=$(echo "$audio_drv_list" | sed -e 's/try-sdl//') - else - audio_drv_list=$(echo "$audio_drv_list" | sed -e 's/try-sdl/sdl/') - fi - ;; - - coreaudio | try-coreaudio) - if test "$coreaudio" = "no"; then - if test "$drv" = "try-coreaudio"; then - audio_drv_list=$(echo "$audio_drv_list" | sed -e 's/try-coreaudio//') - else - error_exit "$drv check failed" \ - "Make sure to have the $drv is available." - fi - else - coreaudio_libs="-framework CoreAudio" - if test "$drv" = "try-coreaudio"; then - audio_drv_list=$(echo "$audio_drv_list" | sed -e 's/try-coreaudio/coreaudio/') - fi - fi - ;; - - dsound) - dsound_libs="-lole32 -ldxguid" - audio_win_int="yes" - ;; - - oss) - oss_libs="$oss_lib" - ;; - - jack | try-jack) - if $pkg_config jack --exists; then - libjack=yes - jack_libs=$($pkg_config jack --libs) - if test "$drv" = "try-jack"; then - audio_drv_list=$(echo "$audio_drv_list" | sed -e 's/try-jack/jack/') - fi - else - if test "$drv" = "try-jack"; then - audio_drv_list=$(echo "$audio_drv_list" | sed -e 's/try-jack//') - else - error_exit "$drv check failed" \ - "Make sure to have the $drv libs and headers installed." - fi - fi - ;; - - *) - echo "$audio_possible_drivers" | grep -q "\<$drv\>" || { - error_exit "Unknown driver '$drv' selected" \ - "Possible drivers are: $audio_possible_drivers" - } - ;; - esac -done - ########################################## # plugin linker support probe @@ -3317,71 +2573,6 @@ if test "$modules" = yes; then fi fi -########################################## -# pthread probe -PTHREADLIBS_LIST="-pthread -lpthread -lpthreadGC2" - -pthread=no -cat > $TMPC << EOF -#include -static void *f(void *p) { return NULL; } -int main(void) { - pthread_t thread; - pthread_create(&thread, 0, f, 0); - return 0; -} -EOF -if compile_prog "" "" ; then - pthread=yes -else - for pthread_lib in $PTHREADLIBS_LIST; do - if compile_prog "" "$pthread_lib" ; then - pthread=yes - break - fi - done -fi - -if test "$mingw32" != yes && test "$pthread" = no; then - error_exit "pthread check failed" \ - "Make sure to have the pthread libs and headers installed." -fi - -# check for pthread_setname_np with thread id -pthread_setname_np_w_tid=no -cat > $TMPC << EOF -#include - -static void *f(void *p) { return NULL; } -int main(void) -{ - pthread_t thread; - pthread_create(&thread, 0, f, 0); - pthread_setname_np(thread, "QEMU"); - return 0; -} -EOF -if compile_prog "" "$pthread_lib" ; then - pthread_setname_np_w_tid=yes -fi - -# check for pthread_setname_np without thread id -pthread_setname_np_wo_tid=no -cat > $TMPC << EOF -#include - -static void *f(void *p) { pthread_setname_np("QEMU"); return NULL; } -int main(void) -{ - pthread_t thread; - pthread_create(&thread, 0, f, 0); - return 0; -} -EOF -if compile_prog "" "$pthread_lib" ; then - pthread_setname_np_wo_tid=yes -fi - ########################################## # libssh probe if test "$libssh" != "no" ; then @@ -3397,26 +2588,6 @@ if test "$libssh" != "no" ; then fi fi -########################################## -# linux-aio probe - -if test "$linux_aio" != "no" ; then - cat > $TMPC < -#include -#include -int main(void) { io_setup(0, NULL); io_set_eventfd(NULL, 0); eventfd(0, 0); return 0; } -EOF - if compile_prog "" "-laio" ; then - linux_aio=yes - else - if test "$linux_aio" = "yes" ; then - feature_not_found "linux AIO" "Install libaio devel" - fi - linux_aio=no - fi -fi - ########################################## # TPM emulation is only on POSIX @@ -3432,19 +2603,6 @@ elif test "$tpm" = "yes"; then fi fi -########################################## -# iovec probe -cat > $TMPC < -#include -#include -int main(void) { return sizeof(struct iovec); } -EOF -iovec=no -if compile_prog "" "" ; then - iovec=yes -fi - ########################################## # fdt probe @@ -3504,16 +2662,6 @@ EOF fi fi -malloc=system -if test "$tcmalloc" = "yes" && test "$jemalloc" = "yes" ; then - echo "ERROR: tcmalloc && jemalloc can't be used at the same time" - exit 1 -elif test "$tcmalloc" = "yes" ; then - malloc=tcmalloc -elif test "$jemalloc" = "yes" ; then - malloc=jemalloc -fi - # check for usbfs have_usbfs=no if test "$linux_user" = "yes"; then @@ -3538,41 +2686,6 @@ EOF fi fi -########################################## -# spice probe -if test "$spice_protocol" != "no" ; then - spice_protocol_cflags=$($pkg_config --cflags spice-protocol 2>/dev/null) - if $pkg_config --atleast-version=0.12.3 spice-protocol; then - spice_protocol="yes" - else - if test "$spice_protocol" = "yes" ; then - feature_not_found "spice_protocol" \ - "Install spice-protocol(>=0.12.3) devel" - fi - spice_protocol="no" - fi -fi - -if test "$spice" != "no" ; then - cat > $TMPC << EOF -#include -int main(void) { spice_server_new(); return 0; } -EOF - spice_cflags=$($pkg_config --cflags spice-protocol spice-server 2>/dev/null) - spice_libs=$($pkg_config --libs spice-protocol spice-server 2>/dev/null) - if $pkg_config --atleast-version=0.12.5 spice-server && \ - test "$spice_protocol" = "yes" && \ - compile_prog "$spice_cflags" "$spice_libs" ; then - spice="yes" - else - if test "$spice" = "yes" ; then - feature_not_found "spice" \ - "Install spice-server(>=0.12.5) devel" - fi - spice="no" - fi -fi - ########################################## # check if we have VSS SDK headers for win @@ -3661,56 +2774,6 @@ case "$capstone" in ;; esac -########################################## -# check if we have posix_syslog - -posix_syslog=no -cat > $TMPC << EOF -#include -int main(void) { openlog("qemu", LOG_PID, LOG_DAEMON); syslog(LOG_INFO, "configure"); return 0; } -EOF -if compile_prog "" "" ; then - posix_syslog=yes -fi - -########################################## -# check if trace backend exists - -$python "$source_path/scripts/tracetool.py" "--backends=$trace_backends" --check-backends > /dev/null 2> /dev/null -if test "$?" -ne 0 ; then - error_exit "invalid trace backends" \ - "Please choose supported trace backends." -fi - -########################################## -# For 'ust' backend, test if ust headers are present -if have_backend "ust"; then - if $pkg_config lttng-ust --exists; then - lttng_ust_libs=$($pkg_config --libs lttng-ust) - else - error_exit "Trace backend 'ust' missing lttng-ust header files" - fi -fi - -########################################## -# For 'dtrace' backend, test if 'dtrace' command is present -if have_backend "dtrace"; then - if ! has 'dtrace' ; then - error_exit "dtrace command is not found in PATH $PATH" - fi - trace_backend_stap="no" - if has 'stap' ; then - trace_backend_stap="yes" - - # Workaround to avoid dtrace(1) producing a file with 'hidden' symbol - # visibility. Define STAP_SDT_V2 to produce 'default' symbol visibility - # instead. QEMU --enable-modules depends on this because the SystemTap - # semaphores are linked into the main binary and not the module's shared - # object. - QEMU_CFLAGS="$QEMU_CFLAGS -DSTAP_SDT_V2" - fi -fi - ########################################## # check and set a backend for coroutine @@ -3971,42 +3034,6 @@ EOF fi fi -######################################### -# See if 64-bit atomic operations are supported. -# Note that without __atomic builtins, we can only -# assume atomic loads/stores max at pointer size. - -cat > $TMPC << EOF -#include -int main(void) -{ - uint64_t x = 0, y = 0; - y = __atomic_load_n(&x, __ATOMIC_RELAXED); - __atomic_store_n(&x, y, __ATOMIC_RELAXED); - __atomic_compare_exchange_n(&x, &y, x, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); - __atomic_exchange_n(&x, y, __ATOMIC_RELAXED); - __atomic_fetch_add(&x, y, __ATOMIC_RELAXED); - return 0; -} -EOF -if compile_prog "" "" ; then - atomic64=yes -fi - -######################################## -# check if getauxval is available. - -getauxval=no -cat > $TMPC << EOF -#include -int main(void) { - return getauxval(AT_HWCAP) == 0; -} -EOF -if compile_prog "" "" ; then - getauxval=yes -fi - ######################################## # check if ccache is interfering with # semantic analysis of macros @@ -4049,21 +3076,6 @@ if test "$fortify_source" != "no"; then fi fi -########################################## -# check if struct fsxattr is available via linux/fs.h - -have_fsxattr=no -cat > $TMPC << EOF -#include -struct fsxattr foo; -int main(void) { - return 0; -} -EOF -if compile_prog "" "" ; then - have_fsxattr=yes -fi - ########################################## # check for usable membarrier system call if test "$membarrier" = "yes"; then @@ -4095,33 +3107,6 @@ else membarrier=no fi -########################################## -# check for usable AF_VSOCK environment -have_af_vsock=no -cat > $TMPC << EOF -#include -#include -#include -#if !defined(AF_VSOCK) -# error missing AF_VSOCK flag -#endif -#include -int main(void) { - int sock, ret; - struct sockaddr_vm svm; - socklen_t len = sizeof(svm); - sock = socket(AF_VSOCK, SOCK_STREAM, 0); - ret = getpeername(sock, (struct sockaddr *)&svm, &len); - if ((ret == -1) && (errno == ENOTCONN)) { - return 0; - } - return -1; -} -EOF -if compile_prog "" "" ; then - have_af_vsock=yes -fi - ########################################## # check for usable AF_ALG environment have_afalg=no @@ -4195,26 +3180,6 @@ EOF fi fi -########################################## -# checks for fuzzer -if test "$fuzzing" = "yes" ; then - write_c_fuzzer_skeleton - if test -z "${LIB_FUZZING_ENGINE+xxx}"; then - if compile_prog "$CPU_CFLAGS -Werror -fsanitize=fuzzer" ""; then - have_fuzzer=yes - else - error_exit "Your compiler doesn't support -fsanitize=fuzzer" - exit 1 - fi - fi - - have_clang_coverage_filter=no - echo > $TMPTXT - if compile_prog "$CPU_CFLAGS -Werror -fsanitize=fuzzer -fsanitize-coverage-allowlist=$TMPTXT" ""; then - have_clang_coverage_filter=yes - fi -fi - # Thread sanitizer is, for now, much noisier than the other sanitizers; # keep it separate until that is not the case. if test "$tsan" = "yes" && test "$sanitizers" = "yes"; then @@ -4455,7 +3420,7 @@ EOF update_cxxflags - if do_cxx $CXXFLAGS $CONFIGURE_CXXFLAGS $QEMU_CXXFLAGS -o $TMPE $TMPCXX $TMPO $QEMU_LDFLAGS; then + if do_cxx $CXXFLAGS $EXTRA_CXXFLAGS $CONFIGURE_CXXFLAGS $QEMU_CXXFLAGS -o $TMPE $TMPCXX $TMPO $QEMU_LDFLAGS; then # C++ compiler $cxx works ok with C compiler $cc : else @@ -4490,9 +3455,6 @@ fi if test "$strip_opt" = "yes" ; then echo "STRIP=${strip}" >> $config_host_mak fi -if test "$bigendian" = "yes" ; then - echo "HOST_WORDS_BIGENDIAN=y" >> $config_host_mak -fi if test "$mingw32" = "yes" ; then echo "CONFIG_WIN32=y" >> $config_host_mak if test "$guest_agent_with_vss" = "yes" ; then @@ -4541,44 +3503,9 @@ if test "$slirp_smbd" = "yes" ; then echo "CONFIG_SLIRP_SMBD=y" >> $config_host_mak echo "CONFIG_SMBD_COMMAND=\"$smbd\"" >> $config_host_mak fi -if test "$vde" = "yes" ; then - echo "CONFIG_VDE=y" >> $config_host_mak - echo "VDE_LIBS=$vde_libs" >> $config_host_mak -fi -if test "$netmap" = "yes" ; then - echo "CONFIG_NETMAP=y" >> $config_host_mak -fi -if test "$l2tpv3" = "yes" ; then - echo "CONFIG_L2TPV3=y" >> $config_host_mak -fi if test "$gprof" = "yes" ; then echo "CONFIG_GPROF=y" >> $config_host_mak fi -echo "CONFIG_AUDIO_DRIVERS=$audio_drv_list" >> $config_host_mak -for drv in $audio_drv_list; do - def=CONFIG_AUDIO_$(echo $drv | LC_ALL=C tr '[a-z]' '[A-Z]') - echo "$def=y" >> $config_host_mak -done -if test "$alsa" = "yes" ; then - echo "CONFIG_ALSA=y" >> $config_host_mak -fi -echo "ALSA_LIBS=$alsa_libs" >> $config_host_mak -echo "ALSA_CFLAGS=$alsa_cflags" >> $config_host_mak -if test "$libpulse" = "yes" ; then - echo "CONFIG_LIBPULSE=y" >> $config_host_mak -fi -echo "PULSE_LIBS=$pulse_libs" >> $config_host_mak -echo "PULSE_CFLAGS=$pulse_cflags" >> $config_host_mak -echo "COREAUDIO_LIBS=$coreaudio_libs" >> $config_host_mak -echo "DSOUND_LIBS=$dsound_libs" >> $config_host_mak -echo "OSS_LIBS=$oss_libs" >> $config_host_mak -if test "$libjack" = "yes" ; then - echo "CONFIG_LIBJACK=y" >> $config_host_mak -fi -echo "JACK_LIBS=$jack_libs" >> $config_host_mak -if test "$audio_win_int" = "yes" ; then - echo "CONFIG_AUDIO_WIN_INT=y" >> $config_host_mak -fi echo "CONFIG_BDRV_RW_WHITELIST=$block_drv_rw_whitelist" >> $config_host_mak echo "CONFIG_BDRV_RO_WHITELIST=$block_drv_ro_whitelist" >> $config_host_mak if test "$block_drv_whitelist_tools" = "yes" ; then @@ -4613,22 +3540,12 @@ if test "$gdbus_codegen" != "" ; then fi echo "CONFIG_TLS_PRIORITY=\"$tls_priority\"" >> $config_host_mak -# Work around a system header bug with some kernel/XFS header -# versions where they both try to define 'struct fsxattr': -# xfs headers will not try to redefine structs from linux headers -# if this macro is set. -if test "$have_fsxattr" = "yes" ; then - echo "HAVE_FSXATTR=y" >> $config_host_mak -fi if test "$xen" = "enabled" ; then echo "CONFIG_XEN_BACKEND=y" >> $config_host_mak echo "CONFIG_XEN_CTRL_INTERFACE_VERSION=$xen_ctrl_version" >> $config_host_mak echo "XEN_CFLAGS=$xen_cflags" >> $config_host_mak echo "XEN_LIBS=$xen_libs" >> $config_host_mak fi -if test "$linux_aio" = "yes" ; then - echo "CONFIG_LINUX_AIO=y" >> $config_host_mak -fi if test "$vhost_scsi" = "yes" ; then echo "CONFIG_VHOST_SCSI=y" >> $config_host_mak fi @@ -4662,9 +3579,6 @@ fi if test "$vhost_user_fs" = "yes" ; then echo "CONFIG_VHOST_USER_FS=y" >> $config_host_mak fi -if test "$iovec" = "yes" ; then - echo "CONFIG_IOVEC=y" >> $config_host_mak -fi if test "$membarrier" = "yes" ; then echo "CONFIG_MEMBARRIER=y" >> $config_host_mak fi @@ -4672,16 +3586,6 @@ if test "$tcg" = "enabled" -a "$tcg_interpreter" = "true" ; then echo "CONFIG_TCG_INTERPRETER=y" >> $config_host_mak fi -if test "$spice_protocol" = "yes" ; then - echo "CONFIG_SPICE_PROTOCOL=y" >> $config_host_mak - echo "SPICE_PROTOCOL_CFLAGS=$spice_protocol_cflags" >> $config_host_mak -fi -if test "$spice" = "yes" ; then - echo "CONFIG_SPICE=y" >> $config_host_mak - echo "SPICE_CFLAGS=$spice_cflags $spice_protocol_cflags" >> $config_host_mak - echo "SPICE_LIBS=$spice_libs" >> $config_host_mak -fi - if test "$opengl" = "yes" ; then echo "CONFIG_OPENGL=y" >> $config_host_mak echo "OPENGL_CFLAGS=$opengl_cflags" >> $config_host_mak @@ -4744,14 +3648,6 @@ if test "$cmpxchg128" = "yes" ; then echo "CONFIG_CMPXCHG128=y" >> $config_host_mak fi -if test "$atomic64" = "yes" ; then - echo "CONFIG_ATOMIC64=y" >> $config_host_mak -fi - -if test "$getauxval" = "yes" ; then - echo "CONFIG_GETAUXVAL=y" >> $config_host_mak -fi - if test "$libssh" = "yes" ; then echo "CONFIG_LIBSSH=y" >> $config_host_mak echo "LIBSSH_CFLAGS=$libssh_cflags" >> $config_host_mak @@ -4766,44 +3662,6 @@ if test "$tpm" = "yes"; then echo 'CONFIG_TPM=y' >> $config_host_mak fi -echo "TRACE_BACKENDS=$trace_backends" >> $config_host_mak -if have_backend "nop"; then - echo "CONFIG_TRACE_NOP=y" >> $config_host_mak -fi -if have_backend "simple"; then - echo "CONFIG_TRACE_SIMPLE=y" >> $config_host_mak - # Set the appropriate trace file. - trace_file="\"$trace_file-\" FMT_pid" -fi -if have_backend "log"; then - echo "CONFIG_TRACE_LOG=y" >> $config_host_mak -fi -if have_backend "ust"; then - echo "CONFIG_TRACE_UST=y" >> $config_host_mak - echo "LTTNG_UST_LIBS=$lttng_ust_libs" >> $config_host_mak -fi -if have_backend "dtrace"; then - echo "CONFIG_TRACE_DTRACE=y" >> $config_host_mak - if test "$trace_backend_stap" = "yes" ; then - echo "CONFIG_TRACE_SYSTEMTAP=y" >> $config_host_mak - fi -fi -if have_backend "ftrace"; then - if test "$linux" = "yes" ; then - echo "CONFIG_TRACE_FTRACE=y" >> $config_host_mak - else - feature_not_found "ftrace(trace backend)" "ftrace requires Linux" - fi -fi -if have_backend "syslog"; then - if test "$posix_syslog" = "yes" ; then - echo "CONFIG_TRACE_SYSLOG=y" >> $config_host_mak - else - feature_not_found "syslog(trace backend)" "syslog not available" - fi -fi -echo "CONFIG_TRACE_FILE=$trace_file" >> $config_host_mak - if test "$rdma" = "yes" ; then echo "CONFIG_RDMA=y" >> $config_host_mak echo "RDMA_LIBS=$rdma_libs" >> $config_host_mak @@ -4817,27 +3675,10 @@ if test "$replication" = "yes" ; then echo "CONFIG_REPLICATION=y" >> $config_host_mak fi -if test "$have_af_vsock" = "yes" ; then - echo "CONFIG_AF_VSOCK=y" >> $config_host_mak -fi - if test "$debug_mutex" = "yes" ; then echo "CONFIG_DEBUG_MUTEX=y" >> $config_host_mak fi -# Hold two types of flag: -# CONFIG_THREAD_SETNAME_BYTHREAD - we've got a way of setting the name on -# a thread we have a handle to -# CONFIG_PTHREAD_SETNAME_NP_W_TID - A way of doing it on a particular -# platform -if test "$pthread_setname_np_w_tid" = "yes" ; then - echo "CONFIG_THREAD_SETNAME_BYTHREAD=y" >> $config_host_mak - echo "CONFIG_PTHREAD_SETNAME_NP_W_TID=y" >> $config_host_mak -elif test "$pthread_setname_np_wo_tid" = "yes" ; then - echo "CONFIG_THREAD_SETNAME_BYTHREAD=y" >> $config_host_mak - echo "CONFIG_PTHREAD_SETNAME_NP_WO_TID=y" >> $config_host_mak -fi - if test "$bochs" = "yes" ; then echo "CONFIG_BOCHS=y" >> $config_host_mak fi @@ -4862,37 +3703,6 @@ fi if test "$parallels" = "yes" ; then echo "CONFIG_PARALLELS=y" >> $config_host_mak fi -if test "$have_mlockall" = "yes" ; then - echo "HAVE_MLOCKALL=y" >> $config_host_mak -fi -if test "$fuzzing" = "yes" ; then - # If LIB_FUZZING_ENGINE is set, assume we are running on OSS-Fuzz, and the - # needed CFLAGS have already been provided - if test -z "${LIB_FUZZING_ENGINE+xxx}" ; then - # Add CFLAGS to tell clang to add fuzzer-related instrumentation to all the - # compiled code. - QEMU_CFLAGS="$QEMU_CFLAGS -fsanitize=fuzzer-no-link" - # To build non-fuzzer binaries with --enable-fuzzing, link everything with - # fsanitize=fuzzer-no-link. Otherwise, the linker will be unable to bind - # the fuzzer-related callbacks added by instrumentation. - QEMU_LDFLAGS="$QEMU_LDFLAGS -fsanitize=fuzzer-no-link" - # For the actual fuzzer binaries, we need to link against the libfuzzer - # library. Provide the flags for doing this in FUZZ_EXE_LDFLAGS. The meson - # rule for the fuzzer adds these to the link_args. They need to be - # configurable, to support OSS-Fuzz - FUZZ_EXE_LDFLAGS="-fsanitize=fuzzer" - else - FUZZ_EXE_LDFLAGS="$LIB_FUZZING_ENGINE" - fi - - # Specify a filter to only instrument code that is directly related to - # virtual-devices. - if test "$have_clang_coverage_filter" = "yes" ; then - cp "$source_path/scripts/oss-fuzz/instrumentation-filter-template" \ - instrumentation-filter - QEMU_CFLAGS="$QEMU_CFLAGS -fsanitize-coverage-allowlist=instrumentation-filter" - fi -fi if test "$plugins" = "yes" ; then echo "CONFIG_PLUGIN=y" >> $config_host_mak @@ -4957,16 +3767,7 @@ echo "GLIB_LIBS=$glib_libs" >> $config_host_mak echo "QEMU_LDFLAGS=$QEMU_LDFLAGS" >> $config_host_mak echo "LD_I386_EMULATION=$ld_i386_emulation" >> $config_host_mak echo "EXESUF=$EXESUF" >> $config_host_mak -echo "HOST_DSOSUF=$HOST_DSOSUF" >> $config_host_mak echo "LIBS_QGA=$libs_qga" >> $config_host_mak -if test "$gcov" = "yes" ; then - echo "CONFIG_GCOV=y" >> $config_host_mak -fi - -if test "$fuzzing" != "no"; then - echo "CONFIG_FUZZ=y" >> $config_host_mak -fi -echo "FUZZ_EXE_LDFLAGS=$FUZZ_EXE_LDFLAGS" >> $config_host_mak if test "$rng_none" = "yes"; then echo "CONFIG_RNG_NONE=y" >> $config_host_mak @@ -5056,7 +3857,7 @@ LINKS="$LINKS pc-bios/s390-ccw/Makefile" LINKS="$LINKS roms/seabios/Makefile" LINKS="$LINKS pc-bios/qemu-icon.bmp" LINKS="$LINKS .gdbinit scripts" # scripts needed by relative path in .gdbinit -LINKS="$LINKS tests/acceptance tests/data" +LINKS="$LINKS tests/avocado tests/data" LINKS="$LINKS tests/qemu-iotests/check" LINKS="$LINKS python" LINKS="$LINKS contrib/plugins/Makefile " @@ -5111,9 +3912,14 @@ for rom in seabios; do echo "RANLIB=$ranlib" >> $config_mak done +config_mak=pc-bios/optionrom/config.mak +echo "# Automatically generated by configure - do not modify" > $config_mak +echo "TOPSRC_DIR=$source_path" >> $config_mak + if test "$skip_meson" = no; then cross="config-meson.cross.new" meson_quote() { + test $# = 0 && return echo "'$(echo $* | sed "s/ /','/g")'" } @@ -5128,14 +3934,14 @@ if test "$skip_meson" = no; then test -z "$cxx" && echo "link_language = 'c'" >> $cross echo "[built-in options]" >> $cross - echo "c_args = [${CFLAGS:+$(meson_quote $CFLAGS)}]" >> $cross - echo "cpp_args = [${CXXFLAGS:+$(meson_quote $CXXFLAGS)}]" >> $cross - echo "c_link_args = [${LDFLAGS:+$(meson_quote $LDFLAGS)}]" >> $cross - echo "cpp_link_args = [${LDFLAGS:+$(meson_quote $LDFLAGS)}]" >> $cross + echo "c_args = [$(meson_quote $CFLAGS $EXTRA_CFLAGS)]" >> $cross + echo "cpp_args = [$(meson_quote $CXXFLAGS $EXTRA_CXXFLAGS)]" >> $cross + echo "c_link_args = [$(meson_quote $CFLAGS $LDFLAGS $EXTRA_CFLAGS $EXTRA_LDFLAGS)]" >> $cross + echo "cpp_link_args = [$(meson_quote $CXXFLAGS $LDFLAGS $EXTRA_CXXFLAGS $EXTRA_LDFLAGS)]" >> $cross echo "[binaries]" >> $cross - echo "c = [$(meson_quote $cc)]" >> $cross - test -n "$cxx" && echo "cpp = [$(meson_quote $cxx)]" >> $cross - test -n "$objcc" && echo "objc = [$(meson_quote $objcc)]" >> $cross + echo "c = [$(meson_quote $cc $CPU_CFLAGS)]" >> $cross + test -n "$cxx" && echo "cpp = [$(meson_quote $cxx $CPU_CFLAGS)]" >> $cross + test -n "$objcc" && echo "objc = [$(meson_quote $objcc $CPU_CFLAGS)]" >> $cross echo "ar = [$(meson_quote $ar)]" >> $cross echo "nm = [$(meson_quote $nm)]" >> $cross echo "pkgconfig = [$(meson_quote $pkg_config_exe)]" >> $cross @@ -5183,11 +3989,8 @@ if test "$skip_meson" = no; then mv $cross config-meson.cross rm -rf meson-private meson-info meson-logs - unset staticpic - if ! version_ge "$($meson --version)" 0.56.0; then - staticpic=$(if test "$pie" = yes; then echo true; else echo false; fi) - fi - NINJA=$ninja $meson setup \ + run_meson() { + NINJA=$ninja $meson setup \ --prefix "$prefix" \ --libdir "$libdir" \ --libexecdir "$libexecdir" \ @@ -5198,40 +4001,26 @@ if test "$skip_meson" = no; then --sysconfdir "$sysconfdir" \ --localedir "$localedir" \ --localstatedir "$local_statedir" \ + -Daudio_drv_list=$audio_drv_list \ + -Ddefault_devices=$default_devices \ -Ddocdir="$docdir" \ -Dqemu_firmwarepath="$firmwarepath" \ -Dqemu_suffix="$qemu_suffix" \ + -Dsphinx_build="$sphinx_build" \ + -Dtrace_file="$trace_file" \ -Doptimization=$(if test "$debug" = yes; then echo 0; else echo 2; fi) \ -Ddebug=$(if test "$debug_info" = yes; then echo true; else echo false; fi) \ -Dwerror=$(if test "$werror" = yes; then echo true; else echo false; fi) \ -Dstrip=$(if test "$strip_opt" = yes; then echo true; else echo false; fi) \ -Db_pie=$(if test "$pie" = yes; then echo true; else echo false; fi) \ - ${staticpic:+-Db_staticpic=$staticpic} \ -Db_coverage=$(if test "$gcov" = yes; then echo true; else echo false; fi) \ - -Db_lto=$lto -Dcfi=$cfi -Dcfi_debug=$cfi_debug \ - -Dmalloc=$malloc -Dmalloc_trim=$malloc_trim -Dsparse=$sparse \ - -Dkvm=$kvm -Dhax=$hax -Dwhpx=$whpx -Dhvf=$hvf -Dnvmm=$nvmm \ - -Dxen=$xen -Dxen_pci_passthrough=$xen_pci_passthrough -Dtcg=$tcg \ - -Dcocoa=$cocoa -Dgtk=$gtk -Dmpath=$mpath -Dsdl=$sdl -Dsdl_image=$sdl_image \ - -Dlibusb=$libusb -Dsmartcard=$smartcard -Dusb_redir=$usb_redir -Dvte=$vte \ - -Dvnc=$vnc -Dvnc_sasl=$vnc_sasl -Dvnc_jpeg=$vnc_jpeg -Dvnc_png=$vnc_png \ - -Dgettext=$gettext -Dxkbcommon=$xkbcommon -Du2f=$u2f -Dvirtiofsd=$virtiofsd \ - -Dcapstone=$capstone -Dslirp=$slirp -Dfdt=$fdt -Dbrlapi=$brlapi \ - -Dcurl=$curl -Dglusterfs=$glusterfs -Dbzip2=$bzip2 -Dlibiscsi=$libiscsi \ - -Dlibnfs=$libnfs -Diconv=$iconv -Dcurses=$curses -Dlibudev=$libudev\ - -Drbd=$rbd -Dlzo=$lzo -Dsnappy=$snappy -Dlzfse=$lzfse -Dlibxml2=$libxml2 \ - -Dlibdaxctl=$libdaxctl -Dlibpmem=$libpmem -Dlinux_io_uring=$linux_io_uring \ - -Dgnutls=$gnutls -Dnettle=$nettle -Dgcrypt=$gcrypt -Dauth_pam=$auth_pam \ - -Dzstd=$zstd -Dseccomp=$seccomp -Dvirtfs=$virtfs -Dcap_ng=$cap_ng \ - -Dattr=$attr -Ddefault_devices=$default_devices -Dvirglrenderer=$virglrenderer \ - -Ddocs=$docs -Dsphinx_build=$sphinx_build -Dinstall_blobs=$blobs \ - -Dvhost_user_blk_server=$vhost_user_blk_server -Dmultiprocess=$multiprocess \ - -Dfuse=$fuse -Dfuse_lseek=$fuse_lseek -Dguest_agent_msi=$guest_agent_msi -Dbpf=$bpf\ + -Db_lto=$lto -Dcfi=$cfi -Dtcg=$tcg -Dxen=$xen \ + -Dcapstone=$capstone -Dfdt=$fdt -Dslirp=$slirp \ + $(test -n "${LIB_FUZZING_ENGINE+xxx}" && echo "-Dfuzzing_engine=$LIB_FUZZING_ENGINE") \ $(if test "$default_feature" = no; then echo "-Dauto_features=disabled"; fi) \ - -Dtcg_interpreter=$tcg_interpreter \ - $cross_arg \ - "$PWD" "$source_path" - + "$@" $cross_arg "$PWD" "$source_path" + } + eval run_meson $meson_options if test "$?" -ne 0 ; then error_exit "meson setup failed" fi @@ -5242,6 +4031,7 @@ else perl -i -ne ' s/^gettext = true$/gettext = auto/; s/^gettext = false$/gettext = disabled/; + /^b_staticpic/ && next; print;' meson-private/cmd_line.txt fi fi @@ -5292,9 +4082,12 @@ preserve_env AR preserve_env AS preserve_env CC preserve_env CPP +preserve_env CFLAGS preserve_env CXX +preserve_env CXXFLAGS preserve_env INSTALL preserve_env LD +preserve_env LDFLAGS preserve_env LD_LIBRARY_PATH preserve_env LIBTOOL preserve_env MAKE diff --git a/contrib/plugins/cache.c b/contrib/plugins/cache.c index a1e03ca882..b9226e7c40 100644 --- a/contrib/plugins/cache.c +++ b/contrib/plugins/cache.c @@ -11,6 +11,8 @@ #include +#define STRTOLL(x) g_ascii_strtoll(x, NULL, 10) + QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; static enum qemu_plugin_mem_rw rw = QEMU_PLUGIN_MEM_RW; @@ -82,8 +84,9 @@ typedef struct { char *disas_str; const char *symbol; uint64_t addr; - uint64_t dmisses; - uint64_t imisses; + uint64_t l1_dmisses; + uint64_t l1_imisses; + uint64_t l2_misses; } InsnData; void (*update_hit)(Cache *cache, int set, int blk); @@ -93,15 +96,22 @@ void (*metadata_init)(Cache *cache); void (*metadata_destroy)(Cache *cache); static int cores; -static Cache **dcaches, **icaches; +static Cache **l1_dcaches, **l1_icaches; -static GMutex *dcache_locks; -static GMutex *icache_locks; +static bool use_l2; +static Cache **l2_ucaches; -static uint64_t all_dmem_accesses; -static uint64_t all_imem_accesses; -static uint64_t all_imisses; -static uint64_t all_dmisses; +static GMutex *l1_dcache_locks; +static GMutex *l1_icache_locks; +static GMutex *l2_ucache_locks; + +static uint64_t l1_dmem_accesses; +static uint64_t l1_imem_accesses; +static uint64_t l1_imisses; +static uint64_t l1_dmisses; + +static uint64_t l2_mem_accesses; +static uint64_t l2_misses; static int pow_of_two(int num) { @@ -382,6 +392,7 @@ static void vcpu_mem_access(unsigned int vcpu_index, qemu_plugin_meminfo_t info, struct qemu_plugin_hwaddr *hwaddr; int cache_idx; InsnData *insn; + bool hit_in_l1; hwaddr = qemu_plugin_get_hwaddr(info, vaddr); if (hwaddr && qemu_plugin_hwaddr_is_io(hwaddr)) { @@ -391,14 +402,29 @@ static void vcpu_mem_access(unsigned int vcpu_index, qemu_plugin_meminfo_t info, effective_addr = hwaddr ? qemu_plugin_hwaddr_phys_addr(hwaddr) : vaddr; cache_idx = vcpu_index % cores; - g_mutex_lock(&dcache_locks[cache_idx]); - if (!access_cache(dcaches[cache_idx], effective_addr)) { + g_mutex_lock(&l1_dcache_locks[cache_idx]); + hit_in_l1 = access_cache(l1_dcaches[cache_idx], effective_addr); + if (!hit_in_l1) { insn = (InsnData *) userdata; - __atomic_fetch_add(&insn->dmisses, 1, __ATOMIC_SEQ_CST); - dcaches[cache_idx]->misses++; + __atomic_fetch_add(&insn->l1_dmisses, 1, __ATOMIC_SEQ_CST); + l1_dcaches[cache_idx]->misses++; } - dcaches[cache_idx]->accesses++; - g_mutex_unlock(&dcache_locks[cache_idx]); + l1_dcaches[cache_idx]->accesses++; + g_mutex_unlock(&l1_dcache_locks[cache_idx]); + + if (hit_in_l1 || !use_l2) { + /* No need to access L2 */ + return; + } + + g_mutex_lock(&l2_ucache_locks[cache_idx]); + if (!access_cache(l2_ucaches[cache_idx], effective_addr)) { + insn = (InsnData *) userdata; + __atomic_fetch_add(&insn->l2_misses, 1, __ATOMIC_SEQ_CST); + l2_ucaches[cache_idx]->misses++; + } + l2_ucaches[cache_idx]->accesses++; + g_mutex_unlock(&l2_ucache_locks[cache_idx]); } static void vcpu_insn_exec(unsigned int vcpu_index, void *userdata) @@ -406,18 +432,34 @@ static void vcpu_insn_exec(unsigned int vcpu_index, void *userdata) uint64_t insn_addr; InsnData *insn; int cache_idx; + bool hit_in_l1; insn_addr = ((InsnData *) userdata)->addr; cache_idx = vcpu_index % cores; - g_mutex_lock(&icache_locks[cache_idx]); - if (!access_cache(icaches[cache_idx], insn_addr)) { + g_mutex_lock(&l1_icache_locks[cache_idx]); + hit_in_l1 = access_cache(l1_icaches[cache_idx], insn_addr); + if (!hit_in_l1) { insn = (InsnData *) userdata; - __atomic_fetch_add(&insn->imisses, 1, __ATOMIC_SEQ_CST); - icaches[cache_idx]->misses++; + __atomic_fetch_add(&insn->l1_imisses, 1, __ATOMIC_SEQ_CST); + l1_icaches[cache_idx]->misses++; } - icaches[cache_idx]->accesses++; - g_mutex_unlock(&icache_locks[cache_idx]); + l1_icaches[cache_idx]->accesses++; + g_mutex_unlock(&l1_icache_locks[cache_idx]); + + if (hit_in_l1 || !use_l2) { + /* No need to access L2 */ + return; + } + + g_mutex_lock(&l2_ucache_locks[cache_idx]); + if (!access_cache(l2_ucaches[cache_idx], insn_addr)) { + insn = (InsnData *) userdata; + __atomic_fetch_add(&insn->l2_misses, 1, __ATOMIC_SEQ_CST); + l2_ucaches[cache_idx]->misses++; + } + l2_ucaches[cache_idx]->accesses++; + g_mutex_unlock(&l2_ucache_locks[cache_idx]); } static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) @@ -493,30 +535,34 @@ static void caches_free(Cache **caches) } } -static int dcmp(gconstpointer a, gconstpointer b) +static void append_stats_line(GString *line, uint64_t l1_daccess, + uint64_t l1_dmisses, uint64_t l1_iaccess, + uint64_t l1_imisses, uint64_t l2_access, + uint64_t l2_misses) { - InsnData *insn_a = (InsnData *) a; - InsnData *insn_b = (InsnData *) b; + double l1_dmiss_rate, l1_imiss_rate, l2_miss_rate; - return insn_a->dmisses < insn_b->dmisses ? 1 : -1; -} - -static void append_stats_line(GString *line, uint64_t daccess, uint64_t dmisses, - uint64_t iaccess, uint64_t imisses) -{ - double dmiss_rate, imiss_rate; - - dmiss_rate = ((double) dmisses) / (daccess) * 100.0; - imiss_rate = ((double) imisses) / (iaccess) * 100.0; + l1_dmiss_rate = ((double) l1_dmisses) / (l1_daccess) * 100.0; + l1_imiss_rate = ((double) l1_imisses) / (l1_iaccess) * 100.0; g_string_append_printf(line, "%-14lu %-12lu %9.4lf%% %-14lu %-12lu" - " %9.4lf%%\n", - daccess, - dmisses, - daccess ? dmiss_rate : 0.0, - iaccess, - imisses, - iaccess ? imiss_rate : 0.0); + " %9.4lf%%", + l1_daccess, + l1_dmisses, + l1_daccess ? l1_dmiss_rate : 0.0, + l1_iaccess, + l1_imisses, + l1_iaccess ? l1_imiss_rate : 0.0); + + if (use_l2) { + l2_miss_rate = ((double) l2_misses) / (l2_access) * 100.0; + g_string_append_printf(line, " %-12lu %-11lu %10.4lf%%", + l2_access, + l2_misses, + l2_access ? l2_miss_rate : 0.0); + } + + g_string_append(line, "\n"); } static void sum_stats(void) @@ -525,43 +571,74 @@ static void sum_stats(void) g_assert(cores > 1); for (i = 0; i < cores; i++) { - all_imisses += icaches[i]->misses; - all_dmisses += dcaches[i]->misses; - all_imem_accesses += icaches[i]->accesses; - all_dmem_accesses += dcaches[i]->accesses; + l1_imisses += l1_icaches[i]->misses; + l1_dmisses += l1_dcaches[i]->misses; + l1_imem_accesses += l1_icaches[i]->accesses; + l1_dmem_accesses += l1_dcaches[i]->accesses; + + if (use_l2) { + l2_misses += l2_ucaches[i]->misses; + l2_mem_accesses += l2_ucaches[i]->accesses; + } } } +static int dcmp(gconstpointer a, gconstpointer b) +{ + InsnData *insn_a = (InsnData *) a; + InsnData *insn_b = (InsnData *) b; + + return insn_a->l1_dmisses < insn_b->l1_dmisses ? 1 : -1; +} + static int icmp(gconstpointer a, gconstpointer b) { InsnData *insn_a = (InsnData *) a; InsnData *insn_b = (InsnData *) b; - return insn_a->imisses < insn_b->imisses ? 1 : -1; + return insn_a->l1_imisses < insn_b->l1_imisses ? 1 : -1; +} + +static int l2_cmp(gconstpointer a, gconstpointer b) +{ + InsnData *insn_a = (InsnData *) a; + InsnData *insn_b = (InsnData *) b; + + return insn_a->l2_misses < insn_b->l2_misses ? 1 : -1; } static void log_stats(void) { int i; - Cache *icache, *dcache; + Cache *icache, *dcache, *l2_cache; g_autoptr(GString) rep = g_string_new("core #, data accesses, data misses," " dmiss rate, insn accesses," - " insn misses, imiss rate\n"); + " insn misses, imiss rate"); + + if (use_l2) { + g_string_append(rep, ", l2 accesses, l2 misses, l2 miss rate"); + } + + g_string_append(rep, "\n"); for (i = 0; i < cores; i++) { g_string_append_printf(rep, "%-8d", i); - dcache = dcaches[i]; - icache = icaches[i]; + dcache = l1_dcaches[i]; + icache = l1_icaches[i]; + l2_cache = use_l2 ? l2_ucaches[i] : NULL; append_stats_line(rep, dcache->accesses, dcache->misses, - icache->accesses, icache->misses); + icache->accesses, icache->misses, + l2_cache ? l2_cache->accesses : 0, + l2_cache ? l2_cache->misses : 0); } if (cores > 1) { sum_stats(); g_string_append_printf(rep, "%-8s", "sum"); - append_stats_line(rep, all_dmem_accesses, all_dmisses, - all_imem_accesses, all_imisses); + append_stats_line(rep, l1_dmem_accesses, l1_dmisses, + l1_imem_accesses, l1_imisses, + l2_cache ? l2_mem_accesses : 0, l2_cache ? l2_misses : 0); } g_string_append(rep, "\n"); @@ -585,7 +662,7 @@ static void log_top_insns(void) if (insn->symbol) { g_string_append_printf(rep, " (%s)", insn->symbol); } - g_string_append_printf(rep, ", %ld, %s\n", insn->dmisses, + g_string_append_printf(rep, ", %ld, %s\n", insn->l1_dmisses, insn->disas_str); } @@ -598,10 +675,28 @@ static void log_top_insns(void) if (insn->symbol) { g_string_append_printf(rep, " (%s)", insn->symbol); } - g_string_append_printf(rep, ", %ld, %s\n", insn->imisses, + g_string_append_printf(rep, ", %ld, %s\n", insn->l1_imisses, insn->disas_str); } + if (!use_l2) { + goto finish; + } + + miss_insns = g_list_sort(miss_insns, l2_cmp); + g_string_append_printf(rep, "%s", "\naddress, L2 misses, instruction\n"); + + for (curr = miss_insns, i = 0; curr && i < limit; i++, curr = curr->next) { + insn = (InsnData *) curr->data; + g_string_append_printf(rep, "0x%" PRIx64, insn->addr); + if (insn->symbol) { + g_string_append_printf(rep, " (%s)", insn->symbol); + } + g_string_append_printf(rep, ", %ld, %s\n", insn->l2_misses, + insn->disas_str); + } + +finish: qemu_plugin_outs(rep->str); g_list_free(miss_insns); } @@ -611,8 +706,16 @@ static void plugin_exit(qemu_plugin_id_t id, void *p) log_stats(); log_top_insns(); - caches_free(dcaches); - caches_free(icaches); + caches_free(l1_dcaches); + caches_free(l1_icaches); + + g_free(l1_dcache_locks); + g_free(l1_icache_locks); + + if (use_l2) { + caches_free(l2_ucaches); + g_free(l2_ucache_locks); + } g_hash_table_destroy(miss_ht); } @@ -644,19 +747,24 @@ int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, int argc, char **argv) { int i; - int iassoc, iblksize, icachesize; - int dassoc, dblksize, dcachesize; + int l1_iassoc, l1_iblksize, l1_icachesize; + int l1_dassoc, l1_dblksize, l1_dcachesize; + int l2_assoc, l2_blksize, l2_cachesize; limit = 32; sys = info->system_emulation; - dassoc = 8; - dblksize = 64; - dcachesize = dblksize * dassoc * 32; + l1_dassoc = 8; + l1_dblksize = 64; + l1_dcachesize = l1_dblksize * l1_dassoc * 32; - iassoc = 8; - iblksize = 64; - icachesize = iblksize * iassoc * 32; + l1_iassoc = 8; + l1_iblksize = 64; + l1_icachesize = l1_iblksize * l1_iassoc * 32; + + l2_assoc = 16; + l2_blksize = 64; + l2_cachesize = l2_assoc * l2_blksize * 2048; policy = LRU; @@ -664,29 +772,44 @@ int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, for (i = 0; i < argc; i++) { char *opt = argv[i]; - if (g_str_has_prefix(opt, "iblksize=")) { - iblksize = g_ascii_strtoll(opt + 9, NULL, 10); - } else if (g_str_has_prefix(opt, "iassoc=")) { - iassoc = g_ascii_strtoll(opt + 7, NULL, 10); - } else if (g_str_has_prefix(opt, "icachesize=")) { - icachesize = g_ascii_strtoll(opt + 11, NULL, 10); - } else if (g_str_has_prefix(opt, "dblksize=")) { - dblksize = g_ascii_strtoll(opt + 9, NULL, 10); - } else if (g_str_has_prefix(opt, "dassoc=")) { - dassoc = g_ascii_strtoll(opt + 7, NULL, 10); - } else if (g_str_has_prefix(opt, "dcachesize=")) { - dcachesize = g_ascii_strtoll(opt + 11, NULL, 10); - } else if (g_str_has_prefix(opt, "limit=")) { - limit = g_ascii_strtoll(opt + 6, NULL, 10); - } else if (g_str_has_prefix(opt, "cores=")) { - cores = g_ascii_strtoll(opt + 6, NULL, 10); - } else if (g_str_has_prefix(opt, "evict=")) { - gchar *p = opt + 6; - if (g_strcmp0(p, "rand") == 0) { + g_autofree char **tokens = g_strsplit(opt, "=", 2); + + if (g_strcmp0(tokens[0], "iblksize") == 0) { + l1_iblksize = STRTOLL(tokens[1]); + } else if (g_strcmp0(tokens[0], "iassoc") == 0) { + l1_iassoc = STRTOLL(tokens[1]); + } else if (g_strcmp0(tokens[0], "icachesize") == 0) { + l1_icachesize = STRTOLL(tokens[1]); + } else if (g_strcmp0(tokens[0], "dblksize") == 0) { + l1_dblksize = STRTOLL(tokens[1]); + } else if (g_strcmp0(tokens[0], "dassoc") == 0) { + l1_dassoc = STRTOLL(tokens[1]); + } else if (g_strcmp0(tokens[0], "dcachesize") == 0) { + l1_dcachesize = STRTOLL(tokens[1]); + } else if (g_strcmp0(tokens[0], "limit") == 0) { + limit = STRTOLL(tokens[1]); + } else if (g_strcmp0(tokens[0], "cores") == 0) { + cores = STRTOLL(tokens[1]); + } else if (g_strcmp0(tokens[0], "l2cachesize") == 0) { + use_l2 = true; + l2_cachesize = STRTOLL(tokens[1]); + } else if (g_strcmp0(tokens[0], "l2blksize") == 0) { + use_l2 = true; + l2_blksize = STRTOLL(tokens[1]); + } else if (g_strcmp0(tokens[0], "l2assoc") == 0) { + use_l2 = true; + l2_assoc = STRTOLL(tokens[1]); + } else if (g_strcmp0(tokens[0], "l2") == 0) { + if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &use_l2)) { + fprintf(stderr, "boolean argument parsing failed: %s\n", opt); + return -1; + } + } else if (g_strcmp0(tokens[0], "evict") == 0) { + if (g_strcmp0(tokens[1], "rand") == 0) { policy = RAND; - } else if (g_strcmp0(p, "lru") == 0) { + } else if (g_strcmp0(tokens[1], "lru") == 0) { policy = LRU; - } else if (g_strcmp0(p, "fifo") == 0) { + } else if (g_strcmp0(tokens[1], "fifo") == 0) { policy = FIFO; } else { fprintf(stderr, "invalid eviction policy: %s\n", opt); @@ -700,24 +823,33 @@ int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info, policy_init(); - dcaches = caches_init(dblksize, dassoc, dcachesize); - if (!dcaches) { - const char *err = cache_config_error(dblksize, dassoc, dcachesize); + l1_dcaches = caches_init(l1_dblksize, l1_dassoc, l1_dcachesize); + if (!l1_dcaches) { + const char *err = cache_config_error(l1_dblksize, l1_dassoc, l1_dcachesize); fprintf(stderr, "dcache cannot be constructed from given parameters\n"); fprintf(stderr, "%s\n", err); return -1; } - icaches = caches_init(iblksize, iassoc, icachesize); - if (!icaches) { - const char *err = cache_config_error(iblksize, iassoc, icachesize); + l1_icaches = caches_init(l1_iblksize, l1_iassoc, l1_icachesize); + if (!l1_icaches) { + const char *err = cache_config_error(l1_iblksize, l1_iassoc, l1_icachesize); fprintf(stderr, "icache cannot be constructed from given parameters\n"); fprintf(stderr, "%s\n", err); return -1; } - dcache_locks = g_new0(GMutex, cores); - icache_locks = g_new0(GMutex, cores); + l2_ucaches = use_l2 ? caches_init(l2_blksize, l2_assoc, l2_cachesize) : NULL; + if (!l2_ucaches && use_l2) { + const char *err = cache_config_error(l2_blksize, l2_assoc, l2_cachesize); + fprintf(stderr, "L2 cache cannot be constructed from given parameters\n"); + fprintf(stderr, "%s\n", err); + return -1; + } + + l1_dcache_locks = g_new0(GMutex, cores); + l1_icache_locks = g_new0(GMutex, cores); + l2_ucache_locks = use_l2 ? g_new0(GMutex, cores) : NULL; qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); qemu_plugin_register_atexit_cb(id, plugin_exit, NULL); diff --git a/cpu.c b/cpu.c index 7dd96b7dbf..79ff2dd946 100644 --- a/cpu.c +++ b/cpu.c @@ -360,6 +360,27 @@ void cpu_exec_unrealizefn(CPUState *cpu) cpu_list_remove(cpu); } +static Property cpu_common_props[] = { +#ifndef CONFIG_USER_ONLY + /* + * Create a memory property for softmmu CPU object, + * so users can wire up its memory. (This can't go in hw/core/cpu.c + * because that file is compiled only once for both user-mode + * and system builds.) The default if no link is set up is to use + * the system address space. + */ + DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION, + MemoryRegion *), +#endif + DEFINE_PROP_BOOL("start-powered-off", CPUState, start_powered_off, false), + DEFINE_PROP_END_OF_LIST(), +}; + +void cpu_class_init_props(DeviceClass *dc) +{ + device_class_set_props(dc, cpu_common_props); +} + void cpu_exec_initfn(CPUState *cpu) { cpu->as = NULL; diff --git a/disas/nios2.c b/disas/nios2.c index c3e82140c7..98ac07d72e 100644 --- a/disas/nios2.c +++ b/disas/nios2.c @@ -3478,54 +3478,37 @@ nios2_disassemble (bfd_vma address, unsigned long opcode, instruction word at the address given, and prints the disassembled instruction on the stream info->stream using info->fprintf_func. */ -static int -print_insn_nios2 (bfd_vma address, disassemble_info *info, - enum bfd_endian endianness) +int print_insn_nios2(bfd_vma address, disassemble_info *info) { - bfd_byte buffer[INSNLEN]; - int status; + bfd_byte buffer[INSNLEN]; + int status; - status = (*info->read_memory_func) (address, buffer, INSNLEN, info); - if (status == 0) - { - unsigned long insn; - if (endianness == BFD_ENDIAN_BIG) - insn = (unsigned long) bfd_getb32 (buffer); - else - insn = (unsigned long) bfd_getl32 (buffer); - return nios2_disassemble (address, insn, info); + status = (*info->read_memory_func)(address, buffer, INSNLEN, info); + if (status == 0) { + unsigned long insn; + if (info->endian == BFD_ENDIAN_BIG) { + insn = (unsigned long) bfd_getb32(buffer); + } else { + insn = (unsigned long) bfd_getl32(buffer); + } + return nios2_disassemble(address, insn, info); } - /* We might have a 16-bit R2 instruction at the end of memory. Try that. */ - if (info->mach == bfd_mach_nios2r2) - { - status = (*info->read_memory_func) (address, buffer, 2, info); - if (status == 0) - { - unsigned long insn; - if (endianness == BFD_ENDIAN_BIG) - insn = (unsigned long) bfd_getb16 (buffer); - else - insn = (unsigned long) bfd_getl16 (buffer); - return nios2_disassemble (address, insn, info); - } + /* We might have a 16-bit R2 instruction at the end of memory. Try that. */ + if (info->mach == bfd_mach_nios2r2) { + status = (*info->read_memory_func)(address, buffer, 2, info); + if (status == 0) { + unsigned long insn; + if (info->endian == BFD_ENDIAN_BIG) { + insn = (unsigned long) bfd_getb16(buffer); + } else { + insn = (unsigned long) bfd_getl16(buffer); + } + return nios2_disassemble(address, insn, info); + } } - /* If we got here, we couldn't read anything. */ - (*info->memory_error_func) (status, address, info); - return -1; -} - -/* These two functions are the main entry points, accessed from - disassemble.c. */ -int -print_insn_big_nios2 (bfd_vma address, disassemble_info *info) -{ - return print_insn_nios2 (address, info, BFD_ENDIAN_BIG); -} - -int -print_insn_little_nios2 (bfd_vma address, disassemble_info *info) -{ - return print_insn_nios2 (address, info, BFD_ENDIAN_LITTLE); + /* If we got here, we couldn't read anything. */ + (*info->memory_error_func)(status, address, info); + return -1; } diff --git a/disas/riscv.c b/disas/riscv.c index 278d9be924..793ad14c27 100644 --- a/disas/riscv.c +++ b/disas/riscv.c @@ -478,6 +478,49 @@ typedef enum { rv_op_fsflags = 316, rv_op_fsrmi = 317, rv_op_fsflagsi = 318, + rv_op_bseti = 319, + rv_op_bclri = 320, + rv_op_binvi = 321, + rv_op_bexti = 322, + rv_op_rori = 323, + rv_op_clz = 324, + rv_op_ctz = 325, + rv_op_cpop = 326, + rv_op_sext_h = 327, + rv_op_sext_b = 328, + rv_op_xnor = 329, + rv_op_orn = 330, + rv_op_andn = 331, + rv_op_rol = 332, + rv_op_ror = 333, + rv_op_sh1add = 334, + rv_op_sh2add = 335, + rv_op_sh3add = 336, + rv_op_sh1add_uw = 337, + rv_op_sh2add_uw = 338, + rv_op_sh3add_uw = 339, + rv_op_clmul = 340, + rv_op_clmulr = 341, + rv_op_clmulh = 342, + rv_op_min = 343, + rv_op_minu = 344, + rv_op_max = 345, + rv_op_maxu = 346, + rv_op_clzw = 347, + rv_op_ctzw = 348, + rv_op_cpopw = 349, + rv_op_slli_uw = 350, + rv_op_add_uw = 351, + rv_op_rolw = 352, + rv_op_rorw = 353, + rv_op_rev8 = 354, + rv_op_zext_h = 355, + rv_op_roriw = 356, + rv_op_orc_b = 357, + rv_op_bset = 358, + rv_op_bclr = 359, + rv_op_binv = 360, + rv_op_bext = 361, } rv_op; /* structures */ @@ -1117,6 +1160,49 @@ const rv_opcode_data opcode_data[] = { { "fsflags", rv_codec_i_csr, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, { "fsrmi", rv_codec_i_csr, rv_fmt_rd_zimm, NULL, 0, 0, 0 }, { "fsflagsi", rv_codec_i_csr, rv_fmt_rd_zimm, NULL, 0, 0, 0 }, + { "bseti", rv_codec_i_sh7, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "bclri", rv_codec_i_sh7, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "binvi", rv_codec_i_sh7, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "bexti", rv_codec_i_sh7, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "rori", rv_codec_i_sh7, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "clz", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "ctz", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "cpop", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "sext.h", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "sext.b", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "xnor", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "orn", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "andn", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "rol", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "ror", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sh1add", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sh2add", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sh3add", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sh1add.uw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sh2add.uw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "sh3add.uw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "clmul", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "clmulr", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "clmulh", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "min", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "minu", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "max", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "maxu", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "clzw", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "clzw", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "cpopw", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "slli.uw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "add.uw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "rolw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "rorw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "rev8", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "zext.h", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "roriw", rv_codec_i_sh5, rv_fmt_rd_rs1_imm, NULL, 0, 0, 0 }, + { "orc.b", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "bset", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "bclr", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "binv", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, + { "bext", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, }; /* CSR names */ @@ -1507,7 +1593,20 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) case 0: op = rv_op_addi; break; case 1: switch (((inst >> 27) & 0b11111)) { - case 0: op = rv_op_slli; break; + case 0b00000: op = rv_op_slli; break; + case 0b00101: op = rv_op_bseti; break; + case 0b01001: op = rv_op_bclri; break; + case 0b01101: op = rv_op_binvi; break; + case 0b01100: + switch (((inst >> 20) & 0b1111111)) { + case 0b0000000: op = rv_op_clz; break; + case 0b0000001: op = rv_op_ctz; break; + case 0b0000010: op = rv_op_cpop; break; + /* 0b0000011 */ + case 0b0000100: op = rv_op_sext_b; break; + case 0b0000101: op = rv_op_sext_h; break; + } + break; } break; case 2: op = rv_op_slti; break; @@ -1515,8 +1614,16 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) case 4: op = rv_op_xori; break; case 5: switch (((inst >> 27) & 0b11111)) { - case 0: op = rv_op_srli; break; - case 8: op = rv_op_srai; break; + case 0b00000: op = rv_op_srli; break; + case 0b00101: op = rv_op_orc_b; break; + case 0b01000: op = rv_op_srai; break; + case 0b01001: op = rv_op_bexti; break; + case 0b01100: op = rv_op_rori; break; + case 0b01101: + switch ((inst >> 20) & 0b1111111) { + case 0b0111000: op = rv_op_rev8; break; + } + break; } break; case 6: op = rv_op_ori; break; @@ -1530,12 +1637,21 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) case 1: switch (((inst >> 25) & 0b1111111)) { case 0: op = rv_op_slliw; break; + case 4: op = rv_op_slli_uw; break; + case 48: + switch ((inst >> 20) & 0b11111) { + case 0b00000: op = rv_op_clzw; break; + case 0b00001: op = rv_op_ctzw; break; + case 0b00010: op = rv_op_cpopw; break; + } + break; } break; case 5: switch (((inst >> 25) & 0b1111111)) { case 0: op = rv_op_srliw; break; case 32: op = rv_op_sraiw; break; + case 48: op = rv_op_roriw; break; } break; } @@ -1623,8 +1739,32 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) case 13: op = rv_op_divu; break; case 14: op = rv_op_rem; break; case 15: op = rv_op_remu; break; + case 36: + switch ((inst >> 20) & 0b11111) { + case 0: op = rv_op_zext_h; break; + } + break; + case 41: op = rv_op_clmul; break; + case 42: op = rv_op_clmulr; break; + case 43: op = rv_op_clmulh; break; + case 44: op = rv_op_min; break; + case 45: op = rv_op_minu; break; + case 46: op = rv_op_max; break; + case 47: op = rv_op_maxu; break; + case 130: op = rv_op_sh1add; break; + case 132: op = rv_op_sh2add; break; + case 134: op = rv_op_sh3add; break; + case 161: op = rv_op_bset; break; case 256: op = rv_op_sub; break; + case 260: op = rv_op_xnor; break; case 261: op = rv_op_sra; break; + case 262: op = rv_op_orn; break; + case 263: op = rv_op_andn; break; + case 289: op = rv_op_bclr; break; + case 293: op = rv_op_bext; break; + case 385: op = rv_op_rol; break; + case 386: op = rv_op_ror; break; + case 417: op = rv_op_binv; break; } break; case 13: op = rv_op_lui; break; @@ -1638,8 +1778,19 @@ static void decode_inst_opcode(rv_decode *dec, rv_isa isa) case 13: op = rv_op_divuw; break; case 14: op = rv_op_remw; break; case 15: op = rv_op_remuw; break; + case 32: op = rv_op_add_uw; break; + case 36: + switch ((inst >> 20) & 0b11111) { + case 0: op = rv_op_zext_h; break; + } + break; + case 130: op = rv_op_sh1add_uw; break; + case 132: op = rv_op_sh2add_uw; break; + case 134: op = rv_op_sh3add_uw; break; case 256: op = rv_op_subw; break; case 261: op = rv_op_sraw; break; + case 385: op = rv_op_rolw; break; + case 389: op = rv_op_rorw; break; } break; case 16: diff --git a/docs/about/build-platforms.rst b/docs/about/build-platforms.rst index bcb1549721..c29a4b8fe6 100644 --- a/docs/about/build-platforms.rst +++ b/docs/about/build-platforms.rst @@ -54,10 +54,12 @@ Those hosts are officially supported, with various accelerators: * - x86 - hax, hvf (64 bit only), kvm, nvmm, tcg, whpx (64 bit only), xen -Other host architectures are not supported. It is possible to build QEMU on an -unsupported host architecture using the configure ``--enable-tcg-interpreter`` -option to enable the experimental TCI support, but note that this is very slow -and is not recommended. +Other host architectures are not supported. It is possible to build QEMU system +emulation on an unsupported host architecture using the configure +``--enable-tcg-interpreter`` option to enable the TCI support, but note that +this is very slow and is not recommended for normal use. QEMU user emulation +requires host-specific support for signal handling, therefore TCI won't help +on unsupported host architectures. Non-supported architectures may be removed in the future following the :ref:`deprecation process`. diff --git a/docs/about/deprecated.rst b/docs/about/deprecated.rst index 3c2be84d80..600031210d 100644 --- a/docs/about/deprecated.rst +++ b/docs/about/deprecated.rst @@ -160,6 +160,26 @@ Use ``-display sdl`` instead. Use ``-display curses`` instead. +``-watchdog`` (since 6.2) +''''''''''''''''''''''''' + +Use ``-device`` instead. + +``-smp`` ("parameter=0" SMP configurations) (since 6.2) +''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +Specified CPU topology parameters must be greater than zero. + +In the SMP configuration, users should either provide a CPU topology +parameter with a reasonable value (greater than zero) or just omit it +and QEMU will compute the missing value. + +However, historically it was implicitly allowed for users to provide +a parameter with zero value, which is meaningless and could also possibly +cause unexpected results in the -smp parsing. So support for this kind of +configurations (e.g. -smp 8,sockets=0) is deprecated since 6.2 and will +be removed in the near future, users have to ensure that all the topology +members described with -smp are greater than zero. Plugin argument passing through ``arg=`` (since 6.1) '''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' @@ -213,6 +233,23 @@ Use the more generic commands ``block-export-add`` and ``block-export-del`` instead. As part of this deprecation, where ``nbd-server-add`` used a single ``bitmap``, the new ``block-export-add`` uses a list of ``bitmaps``. +``query-qmp-schema`` return value member ``values`` (since 6.2) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +Member ``values`` in return value elements with meta-type ``enum`` is +deprecated. Use ``members`` instead. + +``drive-backup`` (since 6.2) +'''''''''''''''''''''''''''' + +Use ``blockdev-backup`` in combination with ``blockdev-add`` instead. +This change primarily separates the creation/opening process of the backup +target with explicit, separate steps. ``blockdev-backup`` uses mostly the +same arguments as ``drive-backup``, except the ``format`` and ``mode`` +options are removed in favor of using explicit ``blockdev-create`` and +``blockdev-add`` calls. See :doc:`/interop/live-block-operations` for +details. + System accelerators ------------------- @@ -238,6 +275,16 @@ The ``I7200`` guest CPU relies on the nanoMIPS ISA, which is deprecated (the ISA has never been upstreamed to a compiler toolchain). Therefore this CPU is also deprecated. + +QEMU API (QAPI) events +---------------------- + +``MEM_UNPLUG_ERROR`` (since 6.2) +'''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +Use the more generic event ``DEVICE_UNPLUG_GUEST_ERROR`` instead. + + System emulator machines ------------------------ @@ -282,6 +329,16 @@ full SCSI support. Use virtio-scsi instead when SCSI passthrough is required. Note this also applies to ``-device virtio-blk-pci,scsi=on|off``, which is an alias. +``-device sga`` (since 6.2) +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``sga`` device loads an option ROM for x86 targets which enables +SeaBIOS to send messages to the serial console. SeaBIOS 1.11.0 onwards +contains native support for this feature and thus use of the option +ROM approach is obsolete. The native SeaBIOS support can be activated +by using ``-machine graphics=off``. + + Block device options '''''''''''''''''''' @@ -324,9 +381,6 @@ The ``I7200`` guest CPU relies on the nanoMIPS ISA, which is deprecated (the ISA has never been upstreamed to a compiler toolchain). Therefore this CPU is also deprecated. -Related binaries ----------------- - Backwards compatibility ----------------------- diff --git a/docs/block-replication.txt b/docs/block-replication.txt index 108e9166a8..59eb2b33b3 100644 --- a/docs/block-replication.txt +++ b/docs/block-replication.txt @@ -79,7 +79,7 @@ Primary | || Secondary disk <--------- hidden-disk 5 <--------- || | | || | | || '-------------------------' - || drive-backup sync=none 6 + || blockdev-backup sync=none 6 1) The disk on the primary is represented by a block device with two children, providing replication between a primary disk and the host that @@ -101,7 +101,7 @@ should support bdrv_make_empty() and backing file. that is modified by the primary VM. It should also start as an empty disk, and the driver supports bdrv_make_empty() and backing file. -6) The drive-backup job (sync=none) is run to allow hidden-disk to buffer +6) The blockdev-backup job (sync=none) is run to allow hidden-disk to buffer any state that would otherwise be lost by the speculative write-through of the NBD server into the secondary disk. So before block replication, the primary disk and secondary disk should contain the same data. diff --git a/docs/conf.py b/docs/conf.py index ff6e92c6e2..763e7d2434 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -74,7 +74,7 @@ needs_sphinx = '1.6' extensions = ['kerneldoc', 'qmp_lexer', 'hxtool', 'depfile', 'qapidoc'] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = [os.path.join(qemu_docdir, '_templates')] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: @@ -85,6 +85,11 @@ source_suffix = '.rst' # The master toctree document. master_doc = 'index' +# Interpret `single-backticks` to be a cross-reference to any kind of +# referenceable object. Unresolvable or ambiguous references will emit a +# warning at build time. +default_role = 'any' + # General information about the project. project = u'QEMU' copyright = u'2021, The QEMU Project Developers' @@ -166,6 +171,7 @@ html_theme = 'sphinx_rtd_theme' if LooseVersion(sphinx_rtd_theme.__version__) >= LooseVersion("0.4.3"): html_theme_options = { "style_nav_header_background": "#802400", + "navigation_with_keys": True, } html_logo = os.path.join(qemu_docdir, "../ui/icons/qemu_128x128.png") @@ -181,6 +187,10 @@ html_css_files = [ 'theme_overrides.css', ] +html_js_files = [ + 'custom.js', +] + html_context = { "display_gitlab": True, "gitlab_user": "qemu-project", diff --git a/docs/devel/build-system.rst b/docs/devel/build-system.rst index 3baec158f2..7a83f5fc0d 100644 --- a/docs/devel/build-system.rst +++ b/docs/devel/build-system.rst @@ -42,73 +42,21 @@ perform a build: ../configure make -For now, checks on the compilation environment are found in configure -rather than meson.build, though this is expected to change. The command -line is parsed in the configure script and, whenever needed, converted -into the appropriate options to Meson. - -New checks should be added to Meson, which usually comprises the -following tasks: - - - Add a Meson build option to meson_options.txt. - - - Add support to the command line arg parser to handle any new - ``--enable-XXX``/``--disable-XXX`` flags required by the feature. - - - Add information to the help output message to report on the new - feature flag. - - - Add code to perform the actual feature check. - - - Add code to include the feature status in ``config-host.h`` - - - Add code to print out the feature status in the configure summary - upon completion. - - -Taking the probe for SDL2_Image as an example, we have the following pieces -in configure:: - - # Initial variable state - sdl_image=auto - - ..snip.. - - # Configure flag processing - --disable-sdl-image) sdl_image=disabled - ;; - --enable-sdl-image) sdl_image=enabled - ;; - - ..snip.. - - # Help output feature message - sdl-image SDL Image support for icons - - ..snip.. - - # Meson invocation - -Dsdl_image=$sdl_image - -In meson_options.txt:: - - option('sdl', type : 'feature', value : 'auto', - description: 'SDL Image support for icons') - -In meson.build:: - - # Detect dependency - sdl_image = dependency('SDL2_image', required: get_option('sdl_image'), - method: 'pkg-config', - kwargs: static_kwargs) - - # Create config-host.h (if applicable) - config_host_data.set('CONFIG_SDL_IMAGE', sdl_image.found()) - - # Summary - summary_info += {'SDL image support': sdl_image.found()} +The configure script automatically recognizes +command line options for which a same-named Meson option exists; +dashes in the command line are replaced with underscores. +Many checks on the compilation environment are still found in configure +rather than ``meson.build``, but new checks should be added directly to +``meson.build``. +Patches are also welcome to move existing checks from the configure +phase to ``meson.build``. When doing so, ensure that ``meson.build`` does +not use anymore the keys that you have removed from ``config-host.mak``. +Typically these will be replaced in ``meson.build`` by boolean variables, +``get_option('optname')`` invocations, or ``dep.found()`` expressions. +In general, the remaining checks have little or no interdependencies, +so they can be moved one by one. Helper functions ---------------- @@ -335,6 +283,60 @@ new target, or enabling new devices or hardware for a particular system/userspace emulation target +Adding checks +------------- + +New checks should be added to Meson. Compiler checks can be as simple as +the following:: + + config_host_data.set('HAVE_BTRFS_H', cc.has_header('linux/btrfs.h')) + +A more complex task such as adding a new dependency usually +comprises the following tasks: + + - Add a Meson build option to meson_options.txt. + + - Add code to perform the actual feature check. + + - Add code to include the feature status in ``config-host.h`` + + - Add code to print out the feature status in the configure summary + upon completion. + +Taking the probe for SDL2_Image as an example, we have the following +in ``meson_options.txt``:: + + option('sdl_image', type : 'feature', value : 'auto', + description: 'SDL Image support for icons') + +Unless the option was given a non-``auto`` value (on the configure +command line), the detection code must be performed only if the +dependency will be used:: + + sdl_image = not_found + if not get_option('sdl_image').auto() or have_system + sdl_image = dependency('SDL2_image', required: get_option('sdl_image'), + method: 'pkg-config', + static: enable_static) + endif + +This avoids warnings on static builds of user-mode emulators, for example. +Most of the libraries used by system-mode emulators are not available for +static linking. + +The other supporting code is generally simple:: + + # Create config-host.h (if applicable) + config_host_data.set('CONFIG_SDL_IMAGE', sdl_image.found()) + + # Summary + summary_info += {'SDL image support': sdl_image.found()} + +For the configure script to parse the new option, the +``scripts/meson-buildoptions.sh`` file must be up-to-date; ``make +update-buildoptions`` (or just ``make``) will take care of updating it. + + Support scripts --------------- @@ -380,6 +382,16 @@ phony target, while benchmarks are run with ``make bench``. Meson test suites such as ``unit`` can be ran with ``make check-unit`` too. It is also possible to run tests defined in meson.build with ``meson test``. +Useful make targets +------------------- + +``help`` + Print a help message for the most common build targets. + +``print-VAR`` + Print the value of the variable VAR. Useful for debugging the build + system. + Important files for the build system ==================================== @@ -404,7 +416,7 @@ number of dynamically created files listed later. ``tests/Makefile.include`` Rules for external test harnesses. These include the TCG tests, - ``qemu-iotests`` and the Avocado-based acceptance tests. + ``qemu-iotests`` and the Avocado-based integration tests. ``tests/docker/Makefile.include`` Rules for Docker tests. Like tests/Makefile, this file is included @@ -452,11 +464,10 @@ Built by Meson: scripts/make_device_config.sh program, feeding it the default-configs/$TARGET-NAME file as input. -``config-host.h``, ``$TARGET-NAME/config-target.h``, ``$TARGET-NAME/config-devices.h`` - These files are used by source code to determine what features - are enabled. They are generated from the contents of the corresponding - ``*.h`` files using the scripts/create_config program. This extracts - relevant variables and formats them as C preprocessor macros. +``config-host.h``, ``$TARGET_NAME-config-target.h``, ``$TARGET_NAME-config-devices.h`` + These files are used by source code to determine what features are + enabled. They are generated from the contents of the corresponding + ``*.mak`` files using Meson's ``configure_file()`` function. ``build.ninja`` The build rules. @@ -473,14 +484,3 @@ Built by Makefile: meson.build. The rules are produced from Meson's JSON description of tests (obtained with "meson introspect --tests") through the script scripts/mtest2make.py. - - -Useful make targets -------------------- - -``help`` - Print a help message for the most common build targets. - -``print-VAR`` - Print the value of the variable VAR. Useful for debugging the build - system. diff --git a/docs/devel/ci-definitions.rst b/docs/devel/ci-definitions.rst.inc similarity index 98% rename from docs/devel/ci-definitions.rst rename to docs/devel/ci-definitions.rst.inc index 32e22ff468..6d5c6fd9f2 100644 --- a/docs/devel/ci-definitions.rst +++ b/docs/devel/ci-definitions.rst.inc @@ -59,7 +59,7 @@ to system testing [5]_. Note that, in some cases, system testing may require interaction with third-party software, like operating system images, databases, networks, and so on. -On QEMU, system testing is represented by the 'check-acceptance' target from +On QEMU, system testing is represented by the 'check-avocado' target from 'make'. Flaky tests diff --git a/docs/devel/ci-jobs.rst b/docs/devel/ci-jobs.rst.inc similarity index 100% rename from docs/devel/ci-jobs.rst rename to docs/devel/ci-jobs.rst.inc diff --git a/docs/devel/ci-runners.rst b/docs/devel/ci-runners.rst.inc similarity index 100% rename from docs/devel/ci-runners.rst rename to docs/devel/ci-runners.rst.inc diff --git a/docs/devel/ci.rst b/docs/devel/ci.rst index 8d95247188..d106610096 100644 --- a/docs/devel/ci.rst +++ b/docs/devel/ci.rst @@ -8,6 +8,6 @@ found at:: https://wiki.qemu.org/Testing/CI -.. include:: ci-definitions.rst -.. include:: ci-jobs.rst -.. include:: ci-runners.rst +.. include:: ci-definitions.rst.inc +.. include:: ci-jobs.rst.inc +.. include:: ci-runners.rst.inc diff --git a/docs/devel/fuzzing.rst b/docs/devel/fuzzing.rst index 2749bb9bed..784ecb99e6 100644 --- a/docs/devel/fuzzing.rst +++ b/docs/devel/fuzzing.rst @@ -182,10 +182,11 @@ The output should contain a complete list of matched MemoryRegions. OSS-Fuzz -------- -QEMU is continuously fuzzed on `OSS-Fuzz` __(https://github.com/google/oss-fuzz). -By default, the OSS-Fuzz build will try to fuzz every fuzz-target. Since the -generic-fuzz target requires additional information provided in environment -variables, we pre-define some generic-fuzz configs in +QEMU is continuously fuzzed on `OSS-Fuzz +`_. By default, the OSS-Fuzz build +will try to fuzz every fuzz-target. Since the generic-fuzz target +requires additional information provided in environment variables, we +pre-define some generic-fuzz configs in ``tests/qtest/fuzz/generic_fuzz_configs.h``. Each config must specify: - ``.name``: To identify the fuzzer config diff --git a/docs/devel/index.rst b/docs/devel/index.rst index f95df10b3e..7c25177c5d 100644 --- a/docs/devel/index.rst +++ b/docs/devel/index.rst @@ -44,4 +44,4 @@ modifying QEMU's source code. ebpf_rss vfio-migration qapi-code-gen - writing-qmp-commands + writing-monitor-commands diff --git a/docs/devel/loads-stores.rst b/docs/devel/loads-stores.rst index 568274baec..8f0035c821 100644 --- a/docs/devel/loads-stores.rst +++ b/docs/devel/loads-stores.rst @@ -68,15 +68,19 @@ Regexes for git grep - ``\`` - ``\`` -``cpu_{ld,st}*_mmuidx_ra`` -~~~~~~~~~~~~~~~~~~~~~~~~~~ +``cpu_{ld,st}*_mmu`` +~~~~~~~~~~~~~~~~~~~~ -These functions operate on a guest virtual address plus a context, -known as a "mmu index" or ``mmuidx``, which controls how that virtual -address is translated. The meaning of the indexes are target specific, -but specifying a particular index might be necessary if, for instance, -the helper requires an "always as non-privileged" access rather that -the default access for the current state of the guest CPU. +These functions operate on a guest virtual address, plus a context +known as a "mmu index" which controls how that virtual address is +translated, plus a ``MemOp`` which contains alignment requirements +among other things. The ``MemOp`` and mmu index are combined into +a single argument of type ``MemOpIdx``. + +The meaning of the indexes are target specific, but specifying a +particular index might be necessary if, for instance, the helper +requires a "always as non-privileged" access rather than the +default access for the current state of the guest CPU. These functions may cause a guest CPU exception to be taken (e.g. for an alignment fault or MMU fault) which will result in @@ -99,6 +103,35 @@ function, which is a return address into the generated code [#gpc]_. Function names follow the pattern: +load: ``cpu_ld{size}{end}_mmu(env, ptr, oi, retaddr)`` + +store: ``cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr)`` + +``size`` + - ``b`` : 8 bits + - ``w`` : 16 bits + - ``l`` : 32 bits + - ``q`` : 64 bits + +``end`` + - (empty) : for target endian, or 8 bit sizes + - ``_be`` : big endian + - ``_le`` : little endian + +Regexes for git grep: + - ``\`` + - ``\`` + + +``cpu_{ld,st}*_mmuidx_ra`` +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +These functions work like the ``cpu_{ld,st}_mmu`` functions except +that the ``mmuidx`` parameter is not combined with a ``MemOp``, +and therefore there is no required alignment supplied or enforced. + +Function names follow the pattern: + load: ``cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmuidx, retaddr)`` store: ``cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmuidx, retaddr)`` @@ -132,7 +165,8 @@ of the guest CPU, as determined by ``cpu_mmu_index(env, false)``. These are generally the preferred way to do accesses by guest virtual address from helper functions, unless the access should -be performed with a context other than the default. +be performed with a context other than the default, or alignment +should be enforced for the access. Function names follow the pattern: diff --git a/docs/devel/multi-process.rst b/docs/devel/multi-process.rst index 69699329d6..e5758a79ab 100644 --- a/docs/devel/multi-process.rst +++ b/docs/devel/multi-process.rst @@ -1,15 +1,17 @@ -This is the design document for multi-process QEMU. It does not -necessarily reflect the status of the current implementation, which -may lack features or be considerably different from what is described -in this document. This document is still useful as a description of -the goals and general direction of this feature. - -Please refer to the following wiki for latest details: -https://wiki.qemu.org/Features/MultiProcessQEMU - Multi-process QEMU =================== +.. note:: + + This is the design document for multi-process QEMU. It does not + necessarily reflect the status of the current implementation, which + may lack features or be considerably different from what is described + in this document. This document is still useful as a description of + the goals and general direction of this feature. + + Please refer to the following wiki for latest details: + https://wiki.qemu.org/Features/MultiProcessQEMU + QEMU is often used as the hypervisor for virtual machines running in the Oracle cloud. Since one of the advantages of cloud computing is the ability to run many VMs from different tenants in the same cloud diff --git a/docs/devel/qapi-code-gen.rst b/docs/devel/qapi-code-gen.rst index b2569de486..a3b5473089 100644 --- a/docs/devel/qapi-code-gen.rst +++ b/docs/devel/qapi-code-gen.rst @@ -200,7 +200,9 @@ Syntax:: '*if': COND, '*features': FEATURES } ENUM-VALUE = STRING - | { 'name': STRING, '*if': COND } + | { 'name': STRING, + '*if': COND, + '*features': FEATURES } Member 'enum' names the enum type. @@ -706,8 +708,14 @@ QEMU shows a certain behaviour. Special features ~~~~~~~~~~~~~~~~ -Feature "deprecated" marks a command, event, or struct member as -deprecated. It is not supported elsewhere so far. +Feature "deprecated" marks a command, event, enum value, or struct +member as deprecated. It is not supported elsewhere so far. +Interfaces so marked may be withdrawn in future releases in accordance +with QEMU's deprecation policy. + +Feature "unstable" marks a command, event, enum value, or struct +member as unstable. It is not supported elsewhere so far. Interfaces +so marked may be withdrawn or changed incompatibly in future releases. Naming rules and reserved names @@ -742,9 +750,8 @@ Member name ``u`` and names starting with ``has-`` or ``has_`` are reserved for the generator, which uses them for unions and for tracking optional members. -Any name (command, event, type, member, or enum value) beginning with -``x-`` is marked experimental, and may be withdrawn or changed -incompatibly in a future release. +Names beginning with ``x-`` used to signify "experimental". This +convention has been replaced by special feature "unstable". Pragmas ``command-name-exceptions`` and ``member-name-exceptions`` let you violate naming rules. Use for new code is strongly discouraged. See @@ -949,15 +956,16 @@ definition must have documentation. Definition documentation starts with a line naming the definition, followed by an optional overview, a description of each argument (for commands and events), member (for structs and unions), branch (for -alternates), or value (for enums), and finally optional tagged -sections. +alternates), or value (for enums), a description of each feature (if +any), and finally optional tagged sections. -Descriptions of arguments can span multiple lines. The description -text can start on the line following the '\@argname:', in which case it -must not be indented at all. It can also start on the same line as -the '\@argname:'. In this case if it spans multiple lines then second -and subsequent lines must be indented to line up with the first -character of the first line of the description:: +The description of an argument or feature 'name' starts with +'\@name:'. The description text can start on the line following the +'\@name:', in which case it must not be indented at all. It can also +start on the same line as the '\@name:'. In this case if it spans +multiple lines then second and subsequent lines must be indented to +line up with the first character of the first line of the +description:: # @argone: # This is a two line description @@ -979,6 +987,12 @@ The number of spaces between the ':' and the text is not significant. Extensions added after the definition was first released carry a '(since x.y.z)' comment. +The feature descriptions must be preceded by a line "Features:", like +this:: + + # Features: + # @feature: Description text + A tagged section starts with one of the following words: "Note:"/"Notes:", "Since:", "Example"/"Examples", "Returns:", "TODO:". The section ends with the start of a new section. @@ -993,12 +1007,6 @@ multiline argument descriptions. A 'Since: x.y.z' tagged section lists the release that introduced the definition. -The text of a section can start on a new line, in -which case it must not be indented at all. It can also start -on the same line as the 'Note:', 'Returns:', etc tag. In this -case if it spans multiple lines then second and subsequent -lines must be indented to match the first. - An 'Example' or 'Examples' section is automatically rendered entirely as literal fixed-width text. In other sections, the text is formatted, and rST markup can be used. @@ -1157,7 +1165,8 @@ and "variants". "members" is a JSON array describing the object's common members, if any. Each element is a JSON object with members "name" (the member's -name), "type" (the name of its type), and optionally "default". The +name), "type" (the name of its type), "features" (a JSON array of +feature strings), and "default". The latter two are optional. The member is optional if "default" is present. Currently, "default" can only have value null. Other values are reserved for future extensions. The "members" array is in no particular order; clients @@ -1231,14 +1240,22 @@ Example: the SchemaInfo for ['str'] :: "element-type": "str" } The SchemaInfo for an enumeration type has meta-type "enum" and -variant member "values". The values are listed in no particular -order; clients must search the entire enum when learning whether a -particular value is supported. +variant member "members". + +"members" is a JSON array describing the enumeration values. Each +element is a JSON object with member "name" (the member's name), and +optionally "features" (a JSON array of feature strings). The +"members" array is in no particular order; clients must search the +entire array when learning whether a particular value is supported. Example: the SchemaInfo for MyEnum from section `Enumeration types`_ :: { "name": "MyEnum", "meta-type": "enum", - "values": [ "value1", "value2", "value3" ] } + "members": [ + { "name": "value1" }, + { "name": "value2" }, + { "name": "value3" } + ] } The SchemaInfo for a built-in type has the same name as the type in the QAPI schema (see section `Built-in Types`_), with one exception diff --git a/docs/devel/qgraph.rst b/docs/devel/qgraph.rst index c2882c3a33..db44d71002 100644 --- a/docs/devel/qgraph.rst +++ b/docs/devel/qgraph.rst @@ -1,8 +1,7 @@ .. _qgraph: -======================================== Qtest Driver Framework -======================================== +====================== In order to test a specific driver, plain libqos tests need to take care of booting QEMU with the right machine and devices. @@ -31,13 +30,15 @@ so the sdhci-test should only care of linking its qgraph node with that interface. In this way, if the command line of a sdhci driver is changed, only the respective qgraph driver node has to be adjusted. +QGraph concepts +--------------- + The graph is composed by nodes that represent machines, drivers, tests and edges that define the relationships between them (``CONSUMES``, ``PRODUCES``, and ``CONTAINS``). - Nodes -^^^^^^ +~~~~~ A node can be of four types: @@ -64,7 +65,7 @@ Notes for the nodes: drivers name, otherwise they won't be discovered Edges -^^^^^^ +~~~~~ An edge relation between two nodes (drivers or machines) ``X`` and ``Y`` can be: @@ -73,7 +74,7 @@ An edge relation between two nodes (drivers or machines) ``X`` and ``Y`` can be: - ``X CONTAINS Y``: ``Y`` is part of ``X`` component Execution steps -^^^^^^^^^^^^^^^ +~~~~~~~~~~~~~~~ The basic framework steps are the following: @@ -92,8 +93,64 @@ The basic framework steps are the following: Depending on the QEMU binary used, only some drivers/machines will be available and only test that are reached by them will be executed. +Command line +~~~~~~~~~~~~ + +Command line is built by using node names and optional arguments +passed by the user when building the edges. + +There are three types of command line arguments: + +- ``in node`` : created from the node name. For example, machines will + have ``-M `` to its command line, while devices + ``-device ``. It is automatically done by the framework. +- ``after node`` : added as additional argument to the node name. + This argument is added optionally when creating edges, + by setting the parameter ``after_cmd_line`` and + ``extra_edge_opts`` in ``QOSGraphEdgeOptions``. + The framework automatically adds + a comma before ``extra_edge_opts``, + because it is going to add attributes + after the destination node pointed by + the edge containing these options, and automatically + adds a space before ``after_cmd_line``, because it + adds an additional device, not an attribute. +- ``before node`` : added as additional argument to the node name. + This argument is added optionally when creating edges, + by setting the parameter ``before_cmd_line`` in + ``QOSGraphEdgeOptions``. This attribute + is going to add attributes before the destination node + pointed by the edge containing these options. It is + helpful to commands that are not node-representable, + such as ``-fdsev`` or ``-netdev``. + +While adding command line in edges is always used, not all nodes names are +used in every path walk: this is because the contained or produced ones +are already added by QEMU, so only nodes that "consumes" will be used to +build the command line. Also, nodes that will have ``{ "abstract" : true }`` +as QMP attribute will loose their command line, since they are not proper +devices to be added in QEMU. + +Example:: + + QOSGraphEdgeOptions opts = { + .before_cmd_line = "-drive id=drv0,if=none,file=null-co://," + "file.read-zeroes=on,format=raw", + .after_cmd_line = "-device scsi-hd,bus=vs0.0,drive=drv0", + + opts.extra_device_opts = "id=vs0"; + }; + + qos_node_create_driver("virtio-scsi-device", + virtio_scsi_device_create); + qos_node_consumes("virtio-scsi-device", "virtio-bus", &opts); + +Will produce the following command line: +``-drive id=drv0,if=none,file=null-co://, -device virtio-scsi-device,id=vs0 -device scsi-hd,bus=vs0.0,drive=drv0`` + Troubleshooting unavailable tests -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + If there is no path from an available machine to a test then that test will be unavailable and won't execute. This can happen if a test or driver did not set up its qgraph node correctly. It can also happen if the necessary machine type @@ -151,7 +208,7 @@ Typically this is because the QEMU binary lacks support for the necessary machine type or device. Creating a new driver and its interface -""""""""""""""""""""""""""""""""""""""""" +--------------------------------------- Here we continue the ``sdhci`` use case, with the following scenario: @@ -489,7 +546,7 @@ or inverting the consumes edge in consumed_by:: arm/raspi2b --contains--> generic-sdhci Adding a new test -""""""""""""""""" +----------------- Given the above setup, adding a new test is very simple. ``sdhci-test``, taken from ``tests/qtest/sdhci-test.c``:: @@ -565,62 +622,7 @@ and for the binary ``QTEST_QEMU_BINARY=./qemu-system-arm``: Additional examples are also in ``test-qgraph.c`` -Command line: -"""""""""""""" - -Command line is built by using node names and optional arguments -passed by the user when building the edges. - -There are three types of command line arguments: - -- ``in node`` : created from the node name. For example, machines will - have ``-M `` to its command line, while devices - ``-device ``. It is automatically done by the framework. -- ``after node`` : added as additional argument to the node name. - This argument is added optionally when creating edges, - by setting the parameter ``after_cmd_line`` and - ``extra_edge_opts`` in ``QOSGraphEdgeOptions``. - The framework automatically adds - a comma before ``extra_edge_opts``, - because it is going to add attributes - after the destination node pointed by - the edge containing these options, and automatically - adds a space before ``after_cmd_line``, because it - adds an additional device, not an attribute. -- ``before node`` : added as additional argument to the node name. - This argument is added optionally when creating edges, - by setting the parameter ``before_cmd_line`` in - ``QOSGraphEdgeOptions``. This attribute - is going to add attributes before the destination node - pointed by the edge containing these options. It is - helpful to commands that are not node-representable, - such as ``-fdsev`` or ``-netdev``. - -While adding command line in edges is always used, not all nodes names are -used in every path walk: this is because the contained or produced ones -are already added by QEMU, so only nodes that "consumes" will be used to -build the command line. Also, nodes that will have ``{ "abstract" : true }`` -as QMP attribute will loose their command line, since they are not proper -devices to be added in QEMU. - -Example:: - - QOSGraphEdgeOptions opts = { - .before_cmd_line = "-drive id=drv0,if=none,file=null-co://," - "file.read-zeroes=on,format=raw", - .after_cmd_line = "-device scsi-hd,bus=vs0.0,drive=drv0", - - opts.extra_device_opts = "id=vs0"; - }; - - qos_node_create_driver("virtio-scsi-device", - virtio_scsi_device_create); - qos_node_consumes("virtio-scsi-device", "virtio-bus", &opts); - -Will produce the following command line: -``-drive id=drv0,if=none,file=null-co://, -device virtio-scsi-device,id=vs0 -device scsi-hd,bus=vs0.0,drive=drv0`` - Qgraph API reference -^^^^^^^^^^^^^^^^^^^^ +-------------------- .. kernel-doc:: tests/qtest/libqos/qgraph.h diff --git a/docs/devel/tcg-plugins.rst b/docs/devel/tcg-plugins.rst index dac5101a3c..f93ef4fe52 100644 --- a/docs/devel/tcg-plugins.rst +++ b/docs/devel/tcg-plugins.rst @@ -3,7 +3,6 @@ Copyright (c) 2019, Linaro Limited Written by Emilio Cota and Alex Bennée -================ QEMU TCG Plugins ================ @@ -16,60 +15,8 @@ only monitor it passively. However they can do this down to an individual instruction granularity including potentially subscribing to all load and store operations. -API Stability -============= - -This is a new feature for QEMU and it does allow people to develop -out-of-tree plugins that can be dynamically linked into a running QEMU -process. However the project reserves the right to change or break the -API should it need to do so. The best way to avoid this is to submit -your plugin upstream so they can be updated if/when the API changes. - -API versioning --------------- - -All plugins need to declare a symbol which exports the plugin API -version they were built against. This can be done simply by:: - - QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; - -The core code will refuse to load a plugin that doesn't export a -``qemu_plugin_version`` symbol or if plugin version is outside of QEMU's -supported range of API versions. - -Additionally the ``qemu_info_t`` structure which is passed to the -``qemu_plugin_install`` method of a plugin will detail the minimum and -current API versions supported by QEMU. The API version will be -incremented if new APIs are added. The minimum API version will be -incremented if existing APIs are changed or removed. - -Exposure of QEMU internals --------------------------- - -The plugin architecture actively avoids leaking implementation details -about how QEMU's translation works to the plugins. While there are -conceptions such as translation time and translation blocks the -details are opaque to plugins. The plugin is able to query select -details of instructions and system configuration only through the -exported *qemu_plugin* functions. - -Query Handle Lifetime ---------------------- - -Each callback provides an opaque anonymous information handle which -can usually be further queried to find out information about a -translation, instruction or operation. The handles themselves are only -valid during the lifetime of the callback so it is important that any -information that is needed is extracted during the callback and saved -by the plugin. - -API -=== - -.. kernel-doc:: include/qemu/qemu-plugin.h - Usage -===== +----- Any QEMU binary with TCG support has plugins enabled by default. Earlier releases needed to be explicitly enabled with:: @@ -87,8 +34,45 @@ Arguments are plugin specific and can be used to modify their behaviour. In this case the howvec plugin is being asked to use inline ops to count and break down the hint instructions by type. -Plugin Life cycle -================= +Writing plugins +--------------- + +API versioning +~~~~~~~~~~~~~~ + +This is a new feature for QEMU and it does allow people to develop +out-of-tree plugins that can be dynamically linked into a running QEMU +process. However the project reserves the right to change or break the +API should it need to do so. The best way to avoid this is to submit +your plugin upstream so they can be updated if/when the API changes. + +All plugins need to declare a symbol which exports the plugin API +version they were built against. This can be done simply by:: + + QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; + +The core code will refuse to load a plugin that doesn't export a +``qemu_plugin_version`` symbol or if plugin version is outside of QEMU's +supported range of API versions. + +Additionally the ``qemu_info_t`` structure which is passed to the +``qemu_plugin_install`` method of a plugin will detail the minimum and +current API versions supported by QEMU. The API version will be +incremented if new APIs are added. The minimum API version will be +incremented if existing APIs are changed or removed. + +Lifetime of the query handle +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Each callback provides an opaque anonymous information handle which +can usually be further queried to find out information about a +translation, instruction or operation. The handles themselves are only +valid during the lifetime of the callback so it is important that any +information that is needed is extracted during the callback and saved +by the plugin. + +Plugin life cycle +~~~~~~~~~~~~~~~~~ First the plugin is loaded and the public qemu_plugin_install function is called. The plugin will then register callbacks for various plugin @@ -111,11 +95,26 @@ callback which can then ensure atomicity itself. Finally when QEMU exits all the registered *atexit* callbacks are invoked. +Exposure of QEMU internals +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The plugin architecture actively avoids leaking implementation details +about how QEMU's translation works to the plugins. While there are +conceptions such as translation time and translation blocks the +details are opaque to plugins. The plugin is able to query select +details of instructions and system configuration only through the +exported *qemu_plugin* functions. + +API +~~~ + +.. kernel-doc:: include/qemu/qemu-plugin.h + Internals -========= +--------- Locking -------- +~~~~~~~ We have to ensure we cannot deadlock, particularly under MTTCG. For this we acquire a lock when called from plugin code. We also keep the @@ -142,7 +141,7 @@ requested. The plugin isn't completely uninstalled until the safe work has executed while all vCPUs are quiescent. Example Plugins -=============== +--------------- There are a number of plugins included with QEMU and you are encouraged to contribute your own plugins plugins upstream. There is a @@ -212,7 +211,7 @@ The hotpages plugin can be configured using the following arguments: This is an instruction classifier so can be used to count different types of instructions. It has a number of options to refine which get -counted. You can give a value to the `count` argument for a class of +counted. You can give a value to the ``count`` argument for a class of instructions to break it down fully, so for example to see all the system registers accesses:: @@ -362,8 +361,9 @@ which will output an execution trace following this structure:: - contrib/plugins/cache.c -Cache modelling plugin that measures the performance of a given cache -configuration when a given working set is run:: +Cache modelling plugin that measures the performance of a given L1 cache +configuration, and optionally a unified L2 per-core cache when a given working +set is run:: qemu-x86_64 -plugin ./contrib/plugins/libcache.so \ -d plugin -D cache.log ./tests/tcg/x86_64-linux-user/float_convs @@ -421,3 +421,18 @@ The plugin has a number of arguments, all of them are optional: Sets the number of cores for which we maintain separate icache and dcache. (default: for linux-user, N = 1, for full system emulation: N = cores available to guest) + + * l2=on + + Simulates a unified L2 cache (stores blocks for both instructions and data) + using the default L2 configuration (cache size = 2MB, associativity = 16-way, + block size = 64B). + + * l2cachesize=N + * l2blksize=B + * l2assoc=A + + L2 cache configuration arguments. They specify the cache size, block size, and + associativity of the L2 cache, respectively. Setting any of the L2 + configuration arguments implies ``l2=on``. + (default: N = 2097152 (2MB), B = 64, A = 16) diff --git a/docs/devel/testing.rst b/docs/devel/testing.rst index 64c9744795..60c59023e5 100644 --- a/docs/devel/testing.rst +++ b/docs/devel/testing.rst @@ -1,11 +1,10 @@ -=============== Testing in QEMU =============== This document describes the testing infrastructure in QEMU. Testing with "make check" -========================= +------------------------- The "make check" testing family includes most of the C based tests in QEMU. For a quick help, run ``make check-help`` from the source tree. @@ -24,7 +23,7 @@ expect the executables to exist and will fail with obscure messages if they cannot find them. Unit tests ----------- +~~~~~~~~~~ Unit tests, which can be invoked with ``make check-unit``, are simple C tests that typically link to individual QEMU object files and exercise them by @@ -67,7 +66,7 @@ and copy the actual command line which executes the unit test, then run it from the command line. QTest ------ +~~~~~ QTest is a device emulation testing framework. It can be very useful to test device models; it could also control certain aspects of QEMU (such as virtual @@ -81,7 +80,7 @@ QTest cases can be executed with make check-qtest QAPI schema tests ------------------ +~~~~~~~~~~~~~~~~~ The QAPI schema tests validate the QAPI parser used by QMP, by feeding predefined input to the parser and comparing the result with the reference @@ -108,33 +107,14 @@ parser (either fixing a bug or extending/modifying the syntax). To do this: ``qapi-schema += foo.json`` check-block ------------ +~~~~~~~~~~~ ``make check-block`` runs a subset of the block layer iotests (the tests that are in the "auto" group). See the "QEMU iotests" section below for more information. -GCC gcov support ----------------- - -``gcov`` is a GCC tool to analyze the testing coverage by -instrumenting the tested code. To use it, configure QEMU with -``--enable-gcov`` option and build. Then run ``make check`` as usual. - -If you want to gather coverage information on a single test the ``make -clean-gcda`` target can be used to delete any existing coverage -information before running a single test. - -You can generate a HTML coverage report by executing ``make -coverage-html`` which will create -``meson-logs/coveragereport/index.html``. - -Further analysis can be conducted by running the ``gcov`` command -directly on the various .gcda output files. Please read the ``gcov`` -documentation for more information. - QEMU iotests -============ +------------ QEMU iotests, under the directory ``tests/qemu-iotests``, is the testing framework widely used to test block layer related features. It is higher level @@ -171,7 +151,7 @@ More options are supported by the ``./check`` script, run ``./check -h`` for help. Writing a new test case ------------------------ +~~~~~~~~~~~~~~~~~~~~~~~ Consider writing a tests case when you are making any changes to the block layer. An iotest case is usually the choice for that. There are already many @@ -225,7 +205,8 @@ test failure. If using such devices are explicitly desired, consider adding ``locking=off`` option to disable image locking. Debugging a test case ------------------------ +~~~~~~~~~~~~~~~~~~~~~ + The following options to the ``check`` script can be useful when debugging a failing test: @@ -254,7 +235,7 @@ a failing test: ``$TEST_DIR/qemu-machine-``. Test case groups ----------------- +~~~~~~~~~~~~~~~~ "Tests may belong to one or more test groups, which are defined in the form of a comment in the test source file. By convention, test groups are listed @@ -304,10 +285,10 @@ Note that the following group names have a special meaning: .. _container-ref: Container based tests -===================== +--------------------- Introduction ------------- +~~~~~~~~~~~~ The container testing framework in QEMU utilizes public images to build and test QEMU in predefined and widely accessible Linux @@ -322,7 +303,7 @@ The container images are also used to augment the generation of tests for testing TCG. See :ref:`checktcg-ref` for more details. Docker Prerequisites --------------------- +~~~~~~~~~~~~~~~~~~~~ Install "docker" with the system package manager and start the Docker service on your development machine, then make sure you have the privilege to run @@ -353,7 +334,7 @@ exploit the whole host with Docker bind mounting or other privileged operations. So only do it on development machines. Podman Prerequisites --------------------- +~~~~~~~~~~~~~~~~~~~~ Install "podman" with the system package manager. @@ -365,7 +346,7 @@ Install "podman" with the system package manager. The last command should print an empty table, to verify the system is ready. Quickstart ----------- +~~~~~~~~~~ From source tree, type ``make docker-help`` to see the help. Testing can be started without configuring or building QEMU (``configure`` and @@ -381,7 +362,7 @@ is downloaded and initialized automatically), in which the ``test-build`` job is executed. Registry --------- +~~~~~~~~ The QEMU project has a container registry hosted by GitLab at ``registry.gitlab.com/qemu-project/qemu`` which will automatically be @@ -395,7 +376,7 @@ locally by using the ``NOCACHE`` build option: make docker-image-debian10 NOCACHE=1 Images ------- +~~~~~~ Along with many other images, the ``centos8`` image is defined in a Dockerfile in ``tests/docker/dockerfiles/``, called ``centos8.docker``. ``make docker-help`` @@ -410,7 +391,7 @@ mainly used to do necessary host side setup. One such setup is ``binfmt_misc``, for example, to make qemu-user powered cross build containers work. Tests ------ +~~~~~ Different tests are added to cover various configurations to build and test QEMU. Docker tests are the executables under ``tests/docker`` named @@ -421,7 +402,7 @@ source and build it. The full list of tests is printed in the ``make docker-help`` help. Debugging a Docker test failure -------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When CI tasks, maintainers or yourself report a Docker test failure, follow the below steps to debug it: @@ -438,7 +419,7 @@ below steps to debug it: the prompt for debug. Options -------- +~~~~~~~ Various options can be used to affect how Docker tests are done. The full list is in the ``make docker`` help text. The frequently used ones are: @@ -452,7 +433,7 @@ list is in the ``make docker`` help text. The frequently used ones are: failure" section. Thread Sanitizer -================ +---------------- Thread Sanitizer (TSan) is a tool which can detect data races. QEMU supports building and testing with this tool. @@ -462,7 +443,7 @@ For more information on TSan: https://github.com/google/sanitizers/wiki/ThreadSanitizerCppManual Thread Sanitizer in Docker ---------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~ TSan is currently supported in the ubuntu2004 docker. The test-tsan test will build using TSan and then run make check. @@ -477,7 +458,7 @@ We recommend using DEBUG=1 to allow launching the test from inside the docker, and to allow review of the warnings generated by TSan. Building and Testing with TSan ------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ It is possible to build and test with TSan, with a few additional steps. These steps are normally done automatically in the docker. @@ -516,7 +497,7 @@ This allows for running the test and then checking the warnings afterwards. If you want TSan to stop and exit with error on warnings, use exitcode=66. TSan Suppressions ------------------ +~~~~~~~~~~~~~~~~~ Keep in mind that for any data race warning, although there might be a data race detected by TSan, there might be no actual bug here. TSan provides several different mechanisms for suppressing warnings. In general it is recommended @@ -542,7 +523,7 @@ More information on the file format can be found here under "Blacklist Format": https://github.com/google/sanitizers/wiki/ThreadSanitizerFlags TSan Annotations ----------------- +~~~~~~~~~~~~~~~~ include/qemu/tsan.h defines annotations. See this file for more descriptions of the annotations themselves. Annotations can be used to suppress TSan warnings or give TSan more information so that it can detect proper @@ -559,14 +540,14 @@ The full set of annotations can be found here: https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp VM testing -========== +---------- This test suite contains scripts that bootstrap various guest images that have necessary packages to build QEMU. The basic usage is documented in ``Makefile`` help which is displayed with ``make vm-help``. Quickstart ----------- +~~~~~~~~~~ Run ``make vm-help`` to list available make targets. Invoke a specific make command to run build test in an image. For example, ``make vm-build-freebsd`` @@ -581,7 +562,7 @@ concerned about attackers taking control of the guest and potentially exploiting a QEMU security bug to compromise the host. QEMU binaries -------------- +~~~~~~~~~~~~~ By default, qemu-system-x86_64 is searched in $PATH to run the guest. If there isn't one, or if it is older than 2.10, the test won't work. In this case, @@ -590,20 +571,20 @@ provide the QEMU binary in env var: ``QEMU=/path/to/qemu-2.10+``. Likewise the path to qemu-img can be set in QEMU_IMG environment variable. Make jobs ---------- +~~~~~~~~~ The ``-j$X`` option in the make command line is not propagated into the VM, specify ``J=$X`` to control the make jobs in the guest. Debugging ---------- +~~~~~~~~~ Add ``DEBUG=1`` and/or ``V=1`` to the make command to allow interactive debugging and verbose output. If this is not enough, see the next section. ``V=1`` will be propagated down into the make jobs in the guest. Manual invocation ------------------ +~~~~~~~~~~~~~~~~~ Each guest script is an executable script with the same command line options. For example to work with the netbsd guest, use ``$QEMU_SRC/tests/vm/netbsd``: @@ -627,7 +608,7 @@ For example to work with the netbsd guest, use ``$QEMU_SRC/tests/vm/netbsd``: $ ./netbsd --interactive --image /var/tmp/netbsd.img sh Adding new guests ------------------ +~~~~~~~~~~~~~~~~~ Please look at existing guest scripts for how to add new guests. @@ -660,7 +641,7 @@ the script's ``main()``. recommended. Image fuzzer testing -==================== +-------------------- An image fuzzer was added to exercise format drivers. Currently only qcow2 is supported. To start the fuzzer, run @@ -672,17 +653,16 @@ supported. To start the fuzzer, run Alternatively, some command different from "qemu-img info" can be tested, by changing the ``-c`` option. -Acceptance tests using the Avocado Framework -============================================ +Integration tests using the Avocado Framework +--------------------------------------------- -The ``tests/acceptance`` directory hosts functional tests, also known -as acceptance level tests. They're usually higher level tests, and -may interact with external resources and with various guest operating -systems. +The ``tests/avocado`` directory hosts integration tests. They're usually +higher level tests, and may interact with external resources and with +various guest operating systems. These tests are written using the Avocado Testing Framework (which must be installed separately) in conjunction with a the ``avocado_qemu.Test`` -class, implemented at ``tests/acceptance/avocado_qemu``. +class, implemented at ``tests/avocado/avocado_qemu``. Tests based on ``avocado_qemu.Test`` can easily: @@ -712,13 +692,13 @@ Tests based on ``avocado_qemu.Test`` can easily: - http://avocado-framework.readthedocs.io/en/latest/api/utils/avocado.utils.html Running tests -------------- +~~~~~~~~~~~~~ -You can run the acceptance tests simply by executing: +You can run the avocado tests simply by executing: .. code:: - make check-acceptance + make check-avocado This involves the automatic creation of Python virtual environment within the build tree (at ``tests/venv``) which will have all the @@ -733,12 +713,12 @@ specific version, they may be on packages named ``python3-venv`` and ``python3-pip``. It is also possible to run tests based on tags using the -``make check-acceptance`` command and the ``AVOCADO_TAGS`` environment +``make check-avocado`` command and the ``AVOCADO_TAGS`` environment variable: .. code:: - make check-acceptance AVOCADO_TAGS=quick + make check-avocado AVOCADO_TAGS=quick Note that tags separated with commas have an AND behavior, while tags separated by spaces have an OR behavior. For more information on Avocado @@ -747,31 +727,31 @@ tags, see: https://avocado-framework.readthedocs.io/en/latest/guides/user/chapters/tags.html To run a single test file, a couple of them, or a test within a file -using the ``make check-acceptance`` command, set the ``AVOCADO_TESTS`` +using the ``make check-avocado`` command, set the ``AVOCADO_TESTS`` environment variable with the test files or test names. To run all tests from a single file, use: .. code:: - make check-acceptance AVOCADO_TESTS=$FILEPATH + make check-avocado AVOCADO_TESTS=$FILEPATH The same is valid to run tests from multiple test files: .. code:: - make check-acceptance AVOCADO_TESTS='$FILEPATH1 $FILEPATH2' + make check-avocado AVOCADO_TESTS='$FILEPATH1 $FILEPATH2' To run a single test within a file, use: .. code:: - make check-acceptance AVOCADO_TESTS=$FILEPATH:$TESTCLASS.$TESTNAME + make check-avocado AVOCADO_TESTS=$FILEPATH:$TESTCLASS.$TESTNAME The same is valid to run single tests from multiple test files: .. code:: - make check-acceptance AVOCADO_TESTS='$FILEPATH1:$TESTCLASS1.$TESTNAME1 $FILEPATH2:$TESTCLASS2.$TESTNAME2' + make check-avocado AVOCADO_TESTS='$FILEPATH1:$TESTCLASS1.$TESTNAME1 $FILEPATH2:$TESTCLASS2.$TESTNAME2' The scripts installed inside the virtual environment may be used without an "activation". For instance, the Avocado test runner @@ -779,9 +759,9 @@ may be invoked by running: .. code:: - tests/venv/bin/avocado run $OPTION1 $OPTION2 tests/acceptance/ + tests/venv/bin/avocado run $OPTION1 $OPTION2 tests/avocado/ -Note that if ``make check-acceptance`` was not executed before, it is +Note that if ``make check-avocado`` was not executed before, it is possible to create the Python virtual environment with the dependencies needed running: @@ -794,23 +774,23 @@ a test file. To run tests from a single file within the build tree, use: .. code:: - tests/venv/bin/avocado run tests/acceptance/$TESTFILE + tests/venv/bin/avocado run tests/avocado/$TESTFILE To run a single test within a test file, use: .. code:: - tests/venv/bin/avocado run tests/acceptance/$TESTFILE:$TESTCLASS.$TESTNAME + tests/venv/bin/avocado run tests/avocado/$TESTFILE:$TESTCLASS.$TESTNAME Valid test names are visible in the output from any previous execution -of Avocado or ``make check-acceptance``, and can also be queried using: +of Avocado or ``make check-avocado``, and can also be queried using: .. code:: - tests/venv/bin/avocado list tests/acceptance + tests/venv/bin/avocado list tests/avocado Manual Installation -------------------- +~~~~~~~~~~~~~~~~~~~ To manually install Avocado and its dependencies, run: @@ -823,18 +803,18 @@ Alternatively, follow the instructions on this link: https://avocado-framework.readthedocs.io/en/latest/guides/user/chapters/installing.html Overview --------- +~~~~~~~~ -The ``tests/acceptance/avocado_qemu`` directory provides the +The ``tests/avocado/avocado_qemu`` directory provides the ``avocado_qemu`` Python module, containing the ``avocado_qemu.Test`` class. Here's a simple usage example: .. code:: - from avocado_qemu import Test + from avocado_qemu import QemuSystemTest - class Version(Test): + class Version(QemuSystemTest): """ :avocado: tags=quick """ @@ -859,7 +839,7 @@ in the current directory, tagged as "quick", run: avocado run -t quick . The ``avocado_qemu.Test`` base test class ------------------------------------------ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``avocado_qemu.Test`` class has a number of characteristics that are worth being mentioned right away. @@ -879,10 +859,10 @@ and hypothetical example follows: .. code:: - from avocado_qemu import Test + from avocado_qemu import QemuSystemTest - class MultipleMachines(Test): + class MultipleMachines(QemuSystemTest): def test_multiple_machines(self): first_machine = self.get_vm() second_machine = self.get_vm() @@ -909,7 +889,7 @@ At test "tear down", ``avocado_qemu.Test`` handles all the QEMUMachines shutdown. The ``avocado_qemu.LinuxTest`` base test class -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``avocado_qemu.LinuxTest`` is further specialization of the ``avocado_qemu.Test`` class, so it contains all the characteristics of @@ -932,7 +912,7 @@ like this: self.ssh_command('some_command_to_be_run_in_the_guest') Please refer to tests that use ``avocado_qemu.LinuxTest`` under -``tests/acceptance`` for more examples. +``tests/avocado`` for more examples. QEMUMachine ~~~~~~~~~~~ @@ -952,7 +932,7 @@ execution of a QEMU binary, giving its users: a more succinct and intuitive way QEMU binary selection -~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^ The QEMU binary used for the ``self.vm`` QEMUMachine instance will primarily depend on the value of the ``qemu_bin`` parameter. If it's @@ -973,20 +953,23 @@ The resulting ``qemu_bin`` value will be preserved in the ``avocado_qemu.Test`` as an attribute with the same name. Attribute reference -------------------- +~~~~~~~~~~~~~~~~~~~ + +Test +^^^^ Besides the attributes and methods that are part of the base ``avocado.Test`` class, the following attributes are available on any ``avocado_qemu.Test`` instance. vm -~~ +'' A QEMUMachine instance, initially configured according to the given ``qemu_bin`` parameter. arch -~~~~ +'''' The architecture can be used on different levels of the stack, e.g. by the framework or by the test itself. At the framework level, it will @@ -1003,7 +986,7 @@ name. If one is not given explicitly, it will either be set to ``:avocado: tags=arch:VALUE`` tag, it will be set to ``VALUE``. cpu -~~~ +''' The cpu model that will be set to all QEMUMachine instances created by the test. @@ -1014,7 +997,7 @@ name. If one is not given explicitly, it will either be set to ``:avocado: tags=cpu:VALUE`` tag, it will be set to ``VALUE``. machine -~~~~~~~ +''''''' The machine type that will be set to all QEMUMachine instances created by the test. @@ -1025,20 +1008,20 @@ name. If one is not given explicitly, it will either be set to ``:avocado: tags=machine:VALUE`` tag, it will be set to ``VALUE``. qemu_bin -~~~~~~~~ +'''''''' The preserved value of the ``qemu_bin`` parameter or the result of the dynamic probe for a QEMU binary in the current working directory or source tree. LinuxTest -~~~~~~~~~ +^^^^^^^^^ Besides the attributes present on the ``avocado_qemu.Test`` base class, the ``avocado_qemu.LinuxTest`` adds the following attributes: distro -...... +'''''' The name of the Linux distribution used as the guest image for the test. The name should match the **Provider** column on the list @@ -1047,7 +1030,7 @@ of images supported by the avocado.utils.vmimage library: https://avocado-framework.readthedocs.io/en/latest/guides/writer/libs/vmimage.html#supported-images distro_version -.............. +'''''''''''''' The version of the Linux distribution as the guest image for the test. The name should match the **Version** column on the list @@ -1056,7 +1039,7 @@ of images supported by the avocado.utils.vmimage library: https://avocado-framework.readthedocs.io/en/latest/guides/writer/libs/vmimage.html#supported-images distro_checksum -............... +''''''''''''''' The sha256 hash of the guest image file used for the test. @@ -1065,7 +1048,7 @@ same name), no validation on the integrity of the image will be performed. Parameter reference -------------------- +~~~~~~~~~~~~~~~~~~~ To understand how Avocado parameters are accessed by tests, and how they can be passed to tests, please refer to:: @@ -1079,8 +1062,11 @@ like the following: PARAMS (key=qemu_bin, path=*, default=./qemu-system-x86_64) => './qemu-system-x86_64 +Test +^^^^ + arch -~~~~ +'''' The architecture that will influence the selection of a QEMU binary (when one is not explicitly given). @@ -1093,31 +1079,30 @@ This parameter has a direct relation with the ``arch`` attribute. If not given, it will default to None. cpu -~~~ +''' The cpu model that will be set to all QEMUMachine instances created by the test. machine -~~~~~~~ +''''''' The machine type that will be set to all QEMUMachine instances created by the test. - qemu_bin -~~~~~~~~ +'''''''' The exact QEMU binary to be used on QEMUMachine. LinuxTest -~~~~~~~~~ +^^^^^^^^^ Besides the parameters present on the ``avocado_qemu.Test`` base class, the ``avocado_qemu.LinuxTest`` adds the following parameters: distro -...... +'''''' The name of the Linux distribution used as the guest image for the test. The name should match the **Provider** column on the list @@ -1126,7 +1111,7 @@ of images supported by the avocado.utils.vmimage library: https://avocado-framework.readthedocs.io/en/latest/guides/writer/libs/vmimage.html#supported-images distro_version -.............. +'''''''''''''' The version of the Linux distribution as the guest image for the test. The name should match the **Version** column on the list @@ -1135,7 +1120,7 @@ of images supported by the avocado.utils.vmimage library: https://avocado-framework.readthedocs.io/en/latest/guides/writer/libs/vmimage.html#supported-images distro_checksum -............... +''''''''''''''' The sha256 hash of the guest image file used for the test. @@ -1143,7 +1128,8 @@ If this value is not set in the code or by this parameter no validation on the integrity of the image will be performed. Skipping tests --------------- +~~~~~~~~~~~~~~ + The Avocado framework provides Python decorators which allow for easily skip tests running under certain conditions. For example, on the lack of a binary on the test system or when the running environment is a CI system. For further @@ -1158,7 +1144,7 @@ environment variables became a kind of standard way to enable/disable tests. Here is a list of the most used variables: AVOCADO_ALLOW_LARGE_STORAGE -~~~~~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^^^^^ Tests which are going to fetch or produce assets considered *large* are not going to run unless that ``AVOCADO_ALLOW_LARGE_STORAGE=1`` is exported on the environment. @@ -1167,7 +1153,7 @@ The definition of *large* is a bit arbitrary here, but it usually means an asset which occupies at least 1GB of size on disk when uncompressed. AVOCADO_ALLOW_UNTRUSTED_CODE -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ There are tests which will boot a kernel image or firmware that can be considered not safe to run on the developer's workstation, thus they are skipped by default. The definition of *not safe* is also arbitrary but @@ -1178,7 +1164,7 @@ You should export ``AVOCADO_ALLOW_UNTRUSTED_CODE=1`` on the environment in order to allow tests which make use of those kind of assets. AVOCADO_TIMEOUT_EXPECTED -~~~~~~~~~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^^^^^^^^^ The Avocado framework has a timeout mechanism which interrupts tests to avoid the test suite of getting stuck. The timeout value can be set via test parameter or property defined in the test class, for further details:: @@ -1192,7 +1178,7 @@ compiled with debug flags. Therefore, the ``AVOCADO_TIMEOUT_EXPECTED`` variable has been used to determine whether those tests should run or not. GITLAB_CI -~~~~~~~~~ +^^^^^^^^^ A number of tests are flagged to not run on the GitLab CI. Usually because they proved to the flaky or there are constraints on the CI environment which would make them fail. If you encounter a similar situation then use that @@ -1205,7 +1191,7 @@ variable as shown on the code snippet below to skip the test: do_something() Uninstalling Avocado --------------------- +~~~~~~~~~~~~~~~~~~~~ If you've followed the manual installation instructions above, you can easily uninstall Avocado. Start by listing the packages you have @@ -1217,13 +1203,13 @@ And remove any package you want with:: pip uninstall -If you've used ``make check-acceptance``, the Python virtual environment where +If you've used ``make check-avocado``, the Python virtual environment where Avocado is installed will be cleaned up as part of ``make check-clean``. .. _checktcg-ref: Testing with "make check-tcg" -============================= +----------------------------- The check-tcg tests are intended for simple smoke tests of both linux-user and softmmu TCG functionality. However to build test @@ -1256,7 +1242,7 @@ itself. See :ref:`container-ref` for more details. Running subset of tests ------------------------ +~~~~~~~~~~~~~~~~~~~~~~~ You can build the tests for one architecture:: @@ -1270,7 +1256,7 @@ Adding ``V=1`` to the invocation will show the details of how to invoke QEMU for the test which is useful for debugging tests. TCG test dependencies ---------------------- +~~~~~~~~~~~~~~~~~~~~~ The TCG tests are deliberately very light on dependencies and are either totally bare with minimal gcc lib support (for softmmu tests) @@ -1302,3 +1288,22 @@ exercise as many corner cases as possible. It is a useful test suite to run to exercise QEMU's linux-user code:: https://linux-test-project.github.io/ + +GCC gcov support +---------------- + +``gcov`` is a GCC tool to analyze the testing coverage by +instrumenting the tested code. To use it, configure QEMU with +``--enable-gcov`` option and build. Then run the tests as usual. + +If you want to gather coverage information on a single test the ``make +clean-gcda`` target can be used to delete any existing coverage +information before running a single test. + +You can generate a HTML coverage report by executing ``make +coverage-html`` which will create +``meson-logs/coveragereport/index.html``. + +Further analysis can be conducted by running the ``gcov`` command +directly on the various .gcda output files. Please read the ``gcov`` +documentation for more information. diff --git a/docs/devel/writing-qmp-commands.rst b/docs/devel/writing-monitor-commands.rst similarity index 75% rename from docs/devel/writing-qmp-commands.rst rename to docs/devel/writing-monitor-commands.rst index 6a10a06c48..b3e2c8481d 100644 --- a/docs/devel/writing-qmp-commands.rst +++ b/docs/devel/writing-monitor-commands.rst @@ -1,8 +1,8 @@ -How to write QMP commands using the QAPI framework -================================================== +How to write monitor commands +============================= This document is a step-by-step guide on how to write new QMP commands using -the QAPI framework. It also shows how to implement new style HMP commands. +the QAPI framework and HMP commands. This document doesn't discuss QMP protocol level details, nor does it dive into the QAPI framework implementation. @@ -11,6 +11,14 @@ For an in-depth introduction to the QAPI framework, please refer to docs/devel/qapi-code-gen.txt. For documentation about the QMP protocol, start with docs/interop/qmp-intro.txt. +New commands may be implemented in QMP only. New HMP commands should be +implemented on top of QMP. The typical HMP command wraps around an +equivalent QMP command, but HMP convenience commands built from QMP +building blocks are also fine. The long term goal is to make all +existing HMP commands conform to this, to fully isolate HMP from the +internals of QEMU. Refer to the `Writing a debugging aid returning +unstructured text`_ section for further guidance on commands that +would have traditionally been HMP only. Overview -------- @@ -85,8 +93,8 @@ any data". Now you're ready to enter the QMP example commands as explained in the following sections. -Writing a command that doesn't return data ------------------------------------------- +Writing a simple command: hello-world +------------------------------------- That's the most simple QMP command that can be written. Usually, this kind of command carries some meaningful action in QEMU but here it will just print @@ -293,9 +301,7 @@ Here's the implementation of the "hello-world" HMP command:: Error *err = NULL; qmp_hello_world(!!message, message, &err); - if (err) { - monitor_printf(mon, "%s\n", error_get_pretty(err)); - error_free(err); + if (hmp_handle_error(mon, err)) { return; } } @@ -307,9 +313,10 @@ There are three important points to be noticed: 1. The "mon" and "qdict" arguments are mandatory for all HMP functions. The former is the monitor object. The latter is how the monitor passes arguments entered by the user to the command implementation -2. hmp_hello_world() performs error checking. In this example we just print - the error description to the user, but we could do more, like taking - different actions depending on the error qmp_hello_world() returns +2. hmp_hello_world() performs error checking. In this example we just call + hmp_handle_error() which prints a message to the user, but we could do + more, like taking different actions depending on the error + qmp_hello_world() returns 3. The "err" variable must be initialized to NULL before performing the QMP call @@ -340,8 +347,8 @@ Please, check the "-monitor" command-line option to know how to open a user monitor. -Writing a command that returns data ------------------------------------ +Writing more complex commands +----------------------------- A QMP command is capable of returning any data the QAPI supports like integers, strings, booleans, enumerations and user defined types. @@ -350,6 +357,35 @@ In this section we will focus on user defined types. Please, check the QAPI documentation for information about the other types. +Modelling data in QAPI +~~~~~~~~~~~~~~~~~~~~~~ + +For a QMP command that to be considered stable and supported long term, +there is a requirement returned data should be explicitly modelled +using fine-grained QAPI types. As a general guide, a caller of the QMP +command should never need to parse individual returned data fields. If +a field appears to need parsing, then it should be split into separate +fields corresponding to each distinct data item. This should be the +common case for any new QMP command that is intended to be used by +machines, as opposed to exclusively human operators. + +Some QMP commands, however, are only intended as ad hoc debugging aids +for human operators. While they may return large amounts of formatted +data, it is not expected that machines will need to parse the result. +The overhead of defining a fine grained QAPI type for the data may not +be justified by the potential benefit. In such cases, it is permitted +to have a command return a simple string that contains formatted data, +however, it is mandatory for the command to use the 'x-' name prefix. +This indicates that the command is not guaranteed to be long term +stable / liable to change in future and is not following QAPI design +best practices. An example where this approach is taken is the QMP +command "x-query-registers". This returns a formatted dump of the +architecture specific CPU state. The way the data is formatted varies +across QEMU targets, is liable to change over time, and is only +intended to be consumed as an opaque string by machines. Refer to the +`Writing a debugging aid returning unstructured text`_ section for +an illustration. + User Defined Types ~~~~~~~~~~~~~~~~~~ @@ -466,9 +502,7 @@ Here's the HMP counterpart of the query-alarm-clock command:: Error *err = NULL; clock = qmp_query_alarm_clock(&err); - if (err) { - monitor_printf(mon, "Could not query alarm clock information\n"); - error_free(err); + if (hmp_handle_error(mon, err)) { return; } @@ -607,9 +641,7 @@ has to traverse the list, it's shown below for reference:: Error *err = NULL; method_list = qmp_query_alarm_methods(&err); - if (err) { - monitor_printf(mon, "Could not query alarm methods\n"); - error_free(err); + if (hmp_handle_error(mon, err)) { return; } @@ -620,3 +652,100 @@ has to traverse the list, it's shown below for reference:: qapi_free_TimerAlarmMethodList(method_list); } + +Writing a debugging aid returning unstructured text +--------------------------------------------------- + +As discussed in section `Modelling data in QAPI`_, it is required that +commands expecting machine usage be using fine-grained QAPI data types. +The exception to this rule applies when the command is solely intended +as a debugging aid and allows for returning unstructured text. This is +commonly needed for query commands that report aspects of QEMU's +internal state that are useful to human operators. + +In this example we will consider a simplified variant of the HMP +command ``info roms``. Following the earlier rules, this command will +need to live under the ``x-`` name prefix, so its QMP implementation +will be called ``x-query-roms``. It will have no parameters and will +return a single text string:: + + { 'struct': 'HumanReadableText', + 'data': { 'human-readable-text': 'str' } } + + { 'command': 'x-query-roms', + 'returns': 'HumanReadableText' } + +The ``HumanReadableText`` struct is intended to be used for all +commands, under the ``x-`` name prefix that are returning unstructured +text targetted at humans. It should never be used for commands outside +the ``x-`` name prefix, as those should be using structured QAPI types. + +Implementing the QMP command +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The QMP implementation will typically involve creating a ``GString`` +object and printing formatted data into it:: + + HumanReadableText *qmp_x_query_roms(Error **errp) + { + g_autoptr(GString) buf = g_string_new(""); + Rom *rom; + + QTAILQ_FOREACH(rom, &roms, next) { + g_string_append_printf("%s size=0x%06zx name=\"%s\"\n", + memory_region_name(rom->mr), + rom->romsize, + rom->name); + } + + return human_readable_text_from_str(buf); + } + + +Implementing the HMP command +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Now that the QMP command is in place, we can also make it available in +the human monitor (HMP) as shown in previous examples. The HMP +implementations will all look fairly similar, as all they need do is +invoke the QMP command and then print the resulting text or error +message. Here's the implementation of the "info roms" HMP command:: + + void hmp_info_roms(Monitor *mon, const QDict *qdict) + { + Error err = NULL; + g_autoptr(HumanReadableText) info = qmp_x_query_roms(&err); + + if (hmp_handle_error(mon, err)) { + return; + } + monitor_printf(mon, "%s", info->human_readable_text); + } + +Also, you have to add the function's prototype to the hmp.h file. + +There's one last step to actually make the command available to +monitor users, we should add it to the hmp-commands-info.hx file:: + + { + .name = "roms", + .args_type = "", + .params = "", + .help = "show roms", + .cmd = hmp_info_roms, + }, + +The case of writing a HMP info handler that calls a no-parameter QMP query +command is quite common. To simplify the implementation there is a general +purpose HMP info handler for this scenario. All that is required to expose +a no-parameter QMP query command via HMP is to declare it using the +'.cmd_info_hrt' field to point to the QMP handler, and leave the '.cmd' +field NULL:: + + { + .name = "roms", + .args_type = "", + .params = "", + .help = "show roms", + .cmd_info_hrt = qmp_x_query_roms, + }, diff --git a/docs/hyperv.txt b/docs/hyperv.txt index 000638a2fd..5d99fd9a72 100644 --- a/docs/hyperv.txt +++ b/docs/hyperv.txt @@ -189,7 +189,15 @@ enabled. Requires: hv-vpindex, hv-synic, hv-time, hv-stimer -3.17. hv-no-nonarch-coresharing=on/off/auto +3.18. hv-avic (hv-apicv) +======================= +The enlightenment allows to use Hyper-V SynIC with hardware APICv/AVIC enabled. +Normally, Hyper-V SynIC disables these hardware feature and suggests the guest +to use paravirtualized AutoEOI feature. +Note: enabling this feature on old hardware (without APICv/AVIC support) may +have negative effect on guest's performace. + +3.19. hv-no-nonarch-coresharing=on/off/auto =========================================== This enlightenment tells guest OS that virtual processors will never share a physical core unless they are reported as sibling SMT threads. This information @@ -203,8 +211,25 @@ When the option is set to 'on' QEMU will always enable the feature, regardless of host setup. To keep guests secure, this can only be used in conjunction with exposing correct vCPU topology and vCPU pinning. -4. Development features -======================== +3.20. hv-version-id-{build,major,minor,spack,sbranch,snumber} +============================================================= +This changes Hyper-V version identification in CPUID 0x40000002.EAX-EDX from the +default (WS2016). +- hv-version-id-build sets 'Build Number' (32 bits) +- hv-version-id-major sets 'Major Version' (16 bits) +- hv-version-id-minor sets 'Minor Version' (16 bits) +- hv-version-id-spack sets 'Service Pack' (32 bits) +- hv-version-id-sbranch sets 'Service Branch' (8 bits) +- hv-version-id-snumber sets 'Service Number' (24 bits) + +Note: hv-version-id-* are not enlightenments and thus don't enable Hyper-V +identification when specified without any other enlightenments. + +4. Supplementary features +========================= + +4.1. hv-passthrough +=================== In some cases (e.g. during development) it may make sense to use QEMU in 'pass-through' mode and give Windows guests all enlightenments currently supported by KVM. This pass-through mode is enabled by "hv-passthrough" CPU @@ -215,8 +240,16 @@ values from KVM to QEMU. "hv-passthrough" overrides all other "hv-*" settings on the command line. Also, enabling this flag effectively prevents migration as the list of enabled enlightenments may differ between target and destination hosts. +4.2. hv-enforce-cpuid +===================== +By default, KVM allows the guest to use all currently supported Hyper-V +enlightenments when Hyper-V CPUID interface was exposed, regardless of if +some features were not announced in guest visible CPUIDs. 'hv-enforce-cpuid' +feature alters this behavior and only allows the guest to use exposed Hyper-V +enlightenments. -4. Useful links + +5. Useful links ================ Hyper-V Top Level Functional specification and other information: https://github.com/MicrosoftDocs/Virtualization-Documentation diff --git a/docs/interop/bitmaps.rst b/docs/interop/bitmaps.rst index 059ad67929..1de46febdc 100644 --- a/docs/interop/bitmaps.rst +++ b/docs/interop/bitmaps.rst @@ -539,12 +539,11 @@ other partial disk images on top of a base image to reconstruct a full backup from the point in time at which the incremental backup was issued. The "Push Model" here references the fact that QEMU is "pushing" the modified -blocks out to a destination. We will be using the `drive-backup -`_ and `blockdev-backup -`_ QMP commands to create both +blocks out to a destination. We will be using the `blockdev-backup +`_ QMP command to create both full and incremental backups. -Both of these commands are jobs, which have their own QMP API for querying and +The command is a background job, which has its own QMP API for querying and management documented in `Background jobs `_. @@ -557,6 +556,10 @@ create a new incremental backup chain attached to a drive. This example creates a new, full backup of "drive0" and accompanies it with a new, empty bitmap that records writes from this point in time forward. +The target can be created with the help of `blockdev-add +`_ or `blockdev-create +`_ command. + .. note:: Any new writes that happen after this command is issued, even while the backup job runs, will be written locally and not to the backup destination. These writes will be recorded in the bitmap @@ -576,12 +579,11 @@ new, empty bitmap that records writes from this point in time forward. } }, { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive0", - "target": "/path/to/drive0.full.qcow2", - "sync": "full", - "format": "qcow2" + "target": "target0", + "sync": "full" } } ] @@ -664,12 +666,11 @@ use a transaction to reset the bitmap while making a new full backup: } }, { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive0", - "target": "/path/to/drive0.new_full.qcow2", - "sync": "full", - "format": "qcow2" + "target": "target0", + "sync": "full" } } ] @@ -728,19 +729,35 @@ Example: First Incremental Backup $ qemu-img create -f qcow2 drive0.inc0.qcow2 \ -b drive0.full.qcow2 -F qcow2 +#. Add target block node: + + .. code-block:: QMP + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target0", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive0.inc0.qcow2" + } + } + } + + <- { "return": {} } + #. Issue an incremental backup command: .. code-block:: QMP -> { - "execute": "drive-backup", + "execute": "blockdev-backup", "arguments": { "device": "drive0", "bitmap": "bitmap0", - "target": "drive0.inc0.qcow2", - "format": "qcow2", - "sync": "incremental", - "mode": "existing" + "target": "target0", + "sync": "incremental" } } @@ -785,20 +802,36 @@ Example: Second Incremental Backup $ qemu-img create -f qcow2 drive0.inc1.qcow2 \ -b drive0.inc0.qcow2 -F qcow2 +#. Add target block node: + + .. code-block:: QMP + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target0", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive0.inc1.qcow2" + } + } + } + + <- { "return": {} } + #. Issue a new incremental backup command. The only difference here is that we have changed the target image below. .. code-block:: QMP -> { - "execute": "drive-backup", + "execute": "blockdev-backup", "arguments": { "device": "drive0", "bitmap": "bitmap0", - "target": "drive0.inc1.qcow2", - "format": "qcow2", - "sync": "incremental", - "mode": "existing" + "target": "target0", + "sync": "incremental" } } @@ -866,20 +899,36 @@ image: file for you, but you lose control over format options like compatibility and preallocation presets. +#. Add target block node: + + .. code-block:: QMP + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target0", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive0.inc2.qcow2" + } + } + } + + <- { "return": {} } + #. Issue a new incremental backup command. Apart from the new destination image, there is no difference from the last two examples. .. code-block:: QMP -> { - "execute": "drive-backup", + "execute": "blockdev-backup", "arguments": { "device": "drive0", "bitmap": "bitmap0", - "target": "drive0.inc2.qcow2", - "format": "qcow2", - "sync": "incremental", - "mode": "existing" + "target": "target0", + "sync": "incremental" } } @@ -930,6 +979,38 @@ point in time. $ qemu-img create -f qcow2 drive0.full.qcow2 64G $ qemu-img create -f qcow2 drive1.full.qcow2 64G +#. Add target block nodes: + + .. code-block:: QMP + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target0", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive0.full.qcow2" + } + } + } + + <- { "return": {} } + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target1", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive1.full.qcow2" + } + } + } + + <- { "return": {} } + #. Create a full (anchor) backup for each drive, with accompanying bitmaps: .. code-block:: QMP @@ -953,21 +1034,19 @@ point in time. } }, { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive0", - "target": "/path/to/drive0.full.qcow2", - "sync": "full", - "format": "qcow2" + "target": "target0", + "sync": "full" } }, { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive1", - "target": "/path/to/drive1.full.qcow2", - "sync": "full", - "format": "qcow2" + "target": "target1", + "sync": "full" } } ] @@ -1016,6 +1095,38 @@ point in time. $ qemu-img create -f qcow2 drive1.inc0.qcow2 \ -b drive1.full.qcow2 -F qcow2 +#. Add target block nodes: + + .. code-block:: QMP + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target0", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive0.inc0.qcow2" + } + } + } + + <- { "return": {} } + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target1", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive1.inc0.qcow2" + } + } + } + + <- { "return": {} } + #. Issue a multi-drive incremental push backup transaction: .. code-block:: QMP @@ -1025,25 +1136,21 @@ point in time. "arguments": { "actions": [ { - "type": "drive-backup", + "type": "blockev-backup", "data": { "device": "drive0", "bitmap": "bitmap0", - "format": "qcow2", - "mode": "existing", "sync": "incremental", - "target": "drive0.inc0.qcow2" + "target": "target0" } }, { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive1", "bitmap": "bitmap0", - "format": "qcow2", - "mode": "existing", "sync": "incremental", - "target": "drive1.inc0.qcow2" + "target": "target1" } }, ] @@ -1119,19 +1226,35 @@ described above. This example demonstrates the single-job failure case: $ qemu-img create -f qcow2 drive0.inc0.qcow2 \ -b drive0.full.qcow2 -F qcow2 +#. Add target block node: + + .. code-block:: QMP + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target0", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive0.inc0.qcow2" + } + } + } + + <- { "return": {} } + #. Attempt to create an incremental backup via QMP: .. code-block:: QMP -> { - "execute": "drive-backup", + "execute": "blockdev-backup", "arguments": { "device": "drive0", "bitmap": "bitmap0", - "target": "drive0.inc0.qcow2", - "format": "qcow2", - "sync": "incremental", - "mode": "existing" + "target": "target0", + "sync": "incremental" } } @@ -1164,6 +1287,19 @@ described above. This example demonstrates the single-job failure case: "event": "BLOCK_JOB_COMPLETED" } +#. Remove target node: + + .. code-block:: QMP + + -> { + "execute": "blockdev-del", + "arguments": { + "node-name": "target0", + } + } + + <- { "return": {} } + #. Delete the failed image, and re-create it. .. code:: bash @@ -1172,20 +1308,36 @@ described above. This example demonstrates the single-job failure case: $ qemu-img create -f qcow2 drive0.inc0.qcow2 \ -b drive0.full.qcow2 -F qcow2 +#. Add target block node: + + .. code-block:: QMP + + -> { + "execute": "blockdev-add", + "arguments": { + "node-name": "target0", + "driver": "qcow2", + "file": { + "driver": "file", + "filename": "drive0.inc0.qcow2" + } + } + } + + <- { "return": {} } + #. Retry the command after fixing the underlying problem, such as freeing up space on the backup volume: .. code-block:: QMP -> { - "execute": "drive-backup", + "execute": "blockdev-backup", "arguments": { "device": "drive0", "bitmap": "bitmap0", - "target": "drive0.inc0.qcow2", - "format": "qcow2", - "sync": "incremental", - "mode": "existing" + "target": "target0", + "sync": "incremental" } } @@ -1210,7 +1362,8 @@ described above. This example demonstrates the single-job failure case: Example: Partial Transactional Failures ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -QMP commands like `drive-backup `_ +QMP commands like `blockdev-backup +`_ conceptually only start a job, and so transactions containing these commands may succeed even if the job it created later fails. This might have surprising interactions with notions of how a "transaction" ought to behave. @@ -1240,25 +1393,21 @@ and one succeeds: "arguments": { "actions": [ { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive0", "bitmap": "bitmap0", - "format": "qcow2", - "mode": "existing", "sync": "incremental", - "target": "drive0.inc0.qcow2" + "target": "target0" } }, { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive1", "bitmap": "bitmap0", - "format": "qcow2", - "mode": "existing", "sync": "incremental", - "target": "drive1.inc0.qcow2" + "target": "target1" } }] } @@ -1375,25 +1524,21 @@ applied: }, "actions": [ { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive0", "bitmap": "bitmap0", - "format": "qcow2", - "mode": "existing", "sync": "incremental", - "target": "drive0.inc0.qcow2" + "target": "target0" } }, { - "type": "drive-backup", + "type": "blockdev-backup", "data": { "device": "drive1", "bitmap": "bitmap0", - "format": "qcow2", - "mode": "existing", "sync": "incremental", - "target": "drive1.inc0.qcow2" + "target": "target1" } }] } diff --git a/docs/interop/live-block-operations.rst b/docs/interop/live-block-operations.rst index 9e3635b233..39e62c9915 100644 --- a/docs/interop/live-block-operations.rst +++ b/docs/interop/live-block-operations.rst @@ -116,8 +116,8 @@ QEMU block layer supports. (3) ``drive-mirror`` (and ``blockdev-mirror``): Synchronize a running disk to another image. -(4) ``drive-backup`` (and ``blockdev-backup``): Point-in-time (live) copy - of a block device to a destination. +(4) ``blockdev-backup`` (and the deprecated ``drive-backup``): + Point-in-time (live) copy of a block device to a destination. .. _`Interacting with a QEMU instance`: @@ -555,13 +555,14 @@ Currently, there are four different kinds: (3) ``none`` -- Synchronize only the new writes from this point on. - .. note:: In the case of ``drive-backup`` (or ``blockdev-backup``), - the behavior of ``none`` synchronization mode is different. - Normally, a ``backup`` job consists of two parts: Anything - that is overwritten by the guest is first copied out to - the backup, and in the background the whole image is - copied from start to end. With ``sync=none``, it's only - the first part. + .. note:: In the case of ``blockdev-backup`` (or deprecated + ``drive-backup``), the behavior of ``none`` + synchronization mode is different. Normally, a + ``backup`` job consists of two parts: Anything that is + overwritten by the guest is first copied out to the + backup, and in the background the whole image is copied + from start to end. With ``sync=none``, it's only the + first part. (4) ``incremental`` -- Synchronize content that is described by the dirty bitmap @@ -640,7 +641,7 @@ at this point: (QEMU) block-job-complete device=job0 In either of the above cases, if you once again run the -`query-block-jobs` command, there should not be any active block +``query-block-jobs`` command, there should not be any active block operation. Comparing 'commit' and 'mirror': In both then cases, the overlay images @@ -928,19 +929,22 @@ Shutdown the guest, by issuing the ``quit`` QMP command:: } -Live disk backup --- ``drive-backup`` and ``blockdev-backup`` -------------------------------------------------------------- +Live disk backup --- ``blockdev-backup`` and the deprecated``drive-backup`` +--------------------------------------------------------------------------- -The ``drive-backup`` (and its newer equivalent ``blockdev-backup``) allows +The ``blockdev-backup`` (and the deprecated ``drive-backup``) allows you to create a point-in-time snapshot. -In this case, the point-in-time is when you *start* the ``drive-backup`` -(or its newer equivalent ``blockdev-backup``) command. +In this case, the point-in-time is when you *start* the +``blockdev-backup`` (or deprecated ``drive-backup``) command. QMP invocation for ``drive-backup`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Note that ``drive-backup`` command is deprecated since QEMU 6.2 and +will be removed in future. + Yet again, starting afresh with our example disk image chain:: [A] <-- [B] <-- [C] <-- [D] @@ -965,11 +969,22 @@ will be issued, indicating the live block device job operation has completed, and no further action is required. +Moving from the deprecated ``drive-backup`` to newer ``blockdev-backup`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``blockdev-backup`` differs from ``drive-backup`` in how you specify +the backup target. With ``blockdev-backup`` you can't specify filename +as a target. Instead you use ``node-name`` of existing block node, +which you may add by ``blockdev-add`` or ``blockdev-create`` commands. +Correspondingly, ``blockdev-backup`` doesn't have ``mode`` and +``format`` arguments which don't apply to an existing block node. See +following sections for details and examples. + + Notes on ``blockdev-backup`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The ``blockdev-backup`` command is equivalent in functionality to -``drive-backup``, except that it operates at node-level in a Block Driver +The ``blockdev-backup`` command operates at node-level in a Block Driver State (BDS) graph. E.g. the sequence of actions to create a point-in-time backup diff --git a/docs/meson.build b/docs/meson.build index cffe1ecf1d..27c6e156ff 100644 --- a/docs/meson.build +++ b/docs/meson.build @@ -37,15 +37,6 @@ endif if build_docs SPHINX_ARGS += ['-Dversion=' + meson.project_version(), '-Drelease=' + config_host['PKGVERSION']] - sphinx_extn_depends = [ meson.source_root() / 'docs/sphinx/depfile.py', - meson.source_root() / 'docs/sphinx/hxtool.py', - meson.source_root() / 'docs/sphinx/kerneldoc.py', - meson.source_root() / 'docs/sphinx/kernellog.py', - meson.source_root() / 'docs/sphinx/qapidoc.py', - meson.source_root() / 'docs/sphinx/qmp_lexer.py', - qapi_gen_depends ] - sphinx_template_files = [ meson.source_root() / 'docs/_templates/footer.html' ] - have_ga = have_tools and config_host.has_key('CONFIG_GUEST_AGENT') man_pages = { @@ -57,7 +48,7 @@ if build_docs 'qemu-nbd.8': (have_tools ? 'man8' : ''), 'qemu-pr-helper.8': (have_tools ? 'man8' : ''), 'qemu-storage-daemon.1': (have_tools ? 'man1' : ''), - 'qemu-trace-stap.1': (config_host.has_key('CONFIG_TRACE_SYSTEMTAP') ? 'man1' : ''), + 'qemu-trace-stap.1': (stap.found() ? 'man1' : ''), 'virtfs-proxy-helper.1': (have_virtfs_proxy_helper ? 'man1' : ''), 'virtiofsd.1': (have_virtiofsd ? 'man1' : ''), 'qemu.1': 'man1', @@ -77,7 +68,6 @@ if build_docs output: 'docs.stamp', input: files('conf.py'), depfile: 'docs.d', - depend_files: [ sphinx_extn_depends, sphinx_template_files ], command: [SPHINX_ARGS, '-Ddepfile=@DEPFILE@', '-Ddepfile_stamp=@OUTPUT0@', '-b', 'html', '-d', private_dir, diff --git a/docs/sphinx-static/custom.js b/docs/sphinx-static/custom.js new file mode 100644 index 0000000000..71a8605305 --- /dev/null +++ b/docs/sphinx-static/custom.js @@ -0,0 +1,9 @@ +document.addEventListener('keydown', (event) => { + // find a better way to look it up? + let search_input = document.getElementsByName('q')[0]; + + if (event.code === 'KeyS' && document.activeElement !== search_input) { + event.preventDefault(); + search_input.focus(); + } +}); diff --git a/docs/sphinx/depfile.py b/docs/sphinx/depfile.py index 277fdf0f56..afdcbcec6e 100644 --- a/docs/sphinx/depfile.py +++ b/docs/sphinx/depfile.py @@ -12,6 +12,8 @@ import os import sphinx +import sys +from pathlib import Path __version__ = '1.0' @@ -20,8 +22,21 @@ def get_infiles(env): yield env.doc2path(x) yield from ((os.path.join(env.srcdir, dep) for dep in env.dependencies[x])) + for mod in sys.modules.values(): + if hasattr(mod, '__file__'): + if mod.__file__: + yield mod.__file__ + # this is perhaps going to include unused files: + for static_path in env.config.html_static_path + env.config.templates_path: + for path in Path(static_path).rglob('*'): + yield str(path) -def write_depfile(app, env): + +def write_depfile(app, exception): + if exception: + return + + env = app.env if not env.config.depfile: return @@ -42,7 +57,7 @@ def write_depfile(app, env): def setup(app): app.add_config_value('depfile', None, 'env') app.add_config_value('depfile_stamp', None, 'env') - app.connect('env-updated', write_depfile) + app.connect('build-finished', write_depfile) return dict( version = __version__, diff --git a/docs/system/arm/orangepi.rst b/docs/system/arm/orangepi.rst index 6f23907fb6..c55694dd91 100644 --- a/docs/system/arm/orangepi.rst +++ b/docs/system/arm/orangepi.rst @@ -250,14 +250,14 @@ and set the following environment variables before booting: Optionally you may save the environment variables to SD card with 'saveenv'. To continue booting simply give the 'boot' command and NetBSD boots. -Orange Pi PC acceptance tests -""""""""""""""""""""""""""""" +Orange Pi PC integration tests +"""""""""""""""""""""""""""""" -The Orange Pi PC machine has several acceptance tests included. +The Orange Pi PC machine has several integration tests included. To run the whole set of tests, build QEMU from source and simply provide the following command: .. code-block:: bash $ AVOCADO_ALLOW_LARGE_STORAGE=yes avocado --show=app,console run \ - -t machine:orangepi-pc tests/acceptance/boot_linux_console.py + -t machine:orangepi-pc tests/avocado/boot_linux_console.py diff --git a/docs/system/arm/xlnx-versal-virt.rst b/docs/system/arm/xlnx-versal-virt.rst index 27f73500d9..92ad10d2da 100644 --- a/docs/system/arm/xlnx-versal-virt.rst +++ b/docs/system/arm/xlnx-versal-virt.rst @@ -32,6 +32,8 @@ Implemented devices: - OCM (256KB of On Chip Memory) - XRAM (4MB of on chip Accelerator RAM) - DDR memory +- BBRAM (36 bytes of Battery-backed RAM) +- eFUSE (3072 bytes of one-time field-programmable bit array) QEMU does not yet model any other devices, including the PL and the AI Engine. @@ -175,3 +177,50 @@ Run the following at the U-Boot prompt: fdt set /chosen/dom0 reg <0x00000000 0x40000000 0x0 0x03100000> booti 30000000 - 20000000 +BBRAM File Backend +"""""""""""""""""" +BBRAM can have an optional file backend, which must be a seekable +binary file with a size of 36 bytes or larger. A file with all +binary 0s is a 'blank'. + +To add a file-backend for the BBRAM: + +.. code-block:: bash + + -drive if=pflash,index=0,file=versal-bbram.bin,format=raw + +To use a different index value, N, from default of 0, add: + +.. code-block:: bash + + -global xlnx,bbram-ctrl.drive-index=N + +eFUSE File Backend +"""""""""""""""""" +eFUSE can have an optional file backend, which must be a seekable +binary file with a size of 3072 bytes or larger. A file with all +binary 0s is a 'blank'. + +To add a file-backend for the eFUSE: + +.. code-block:: bash + + -drive if=pflash,index=1,file=versal-efuse.bin,format=raw + +To use a different index value, N, from default of 1, add: + +.. code-block:: bash + + -global xlnx,efuse.drive-index=N + +.. warning:: + In actual physical Versal, BBRAM and eFUSE contain sensitive data. + The QEMU device models do **not** encrypt nor obfuscate any data + when holding them in models' memory or when writing them to their + file backends. + + Thus, a file backend should be used with caution, and 'format=luks' + is highly recommended (albeit with usage complexity). + + Better yet, do not use actual product data when running guest image + on this Xilinx Versal Virt board. diff --git a/docs/system/device-emulation.rst b/docs/system/device-emulation.rst index 7afcfd8064..19944f526c 100644 --- a/docs/system/device-emulation.rst +++ b/docs/system/device-emulation.rst @@ -88,3 +88,4 @@ Emulated Devices devices/usb.rst devices/vhost-user.rst devices/virtio-pmem.rst + devices/vhost-user-rng.rst diff --git a/docs/system/devices/vhost-user-rng.rst b/docs/system/devices/vhost-user-rng.rst new file mode 100644 index 0000000000..a145d4105c --- /dev/null +++ b/docs/system/devices/vhost-user-rng.rst @@ -0,0 +1,39 @@ +QEMU vhost-user-rng - RNG emulation +=================================== + +Background +---------- + +What follows builds on the material presented in vhost-user.rst - it should +be reviewed before moving forward with the content in this file. + +Description +----------- + +The vhost-user-rng device implementation was designed to work with a random +number generator daemon such as the one found in the vhost-device crate of +the rust-vmm project available on github [1]. + +[1]. https://github.com/rust-vmm/vhost-device + +Examples +-------- + +The daemon should be started first: + +:: + + host# vhost-device-rng --socket-path=rng.sock -c 1 -m 512 -p 1000 + +The QEMU invocation needs to create a chardev socket the device can +use to communicate as well as share the guests memory over a memfd. + +:: + + host# qemu-system \ + -chardev socket,path=$(PATH)/rng.sock,id=rng0 \ + -device vhost-user-rng-pci,chardev=rng0 \ + -m 4096 \ + -object memory-backend-file,id=mem,size=4G,mem-path=/dev/shm,share=on \ + -numa node,memdev=mem \ + ... diff --git a/docs/system/guest-loader.rst b/docs/system/guest-loader.rst index 4320d1183f..9ef9776bf0 100644 --- a/docs/system/guest-loader.rst +++ b/docs/system/guest-loader.rst @@ -51,4 +51,4 @@ The full syntax of the guest-loader is:: ``bootargs=`` This is an optional field for kernel blobs which will pass command - like via the `/chosen/module@/bootargs` node. + like via the ``/chosen/module@/bootargs`` node. diff --git a/docs/system/i386/kvm-pv.rst b/docs/system/i386/kvm-pv.rst new file mode 100644 index 0000000000..1e5a9923ef --- /dev/null +++ b/docs/system/i386/kvm-pv.rst @@ -0,0 +1,100 @@ +Paravirtualized KVM features +============================ + +Description +----------- + +In some cases when implementing hardware interfaces in software is slow, ``KVM`` +implements its own paravirtualized interfaces. + +Setup +----- + +Paravirtualized ``KVM`` features are represented as CPU flags. The following +features are enabled by default for any CPU model when ``KVM`` acceleration is +enabled: + +- ``kvmclock`` +- ``kvm-nopiodelay`` +- ``kvm-asyncpf`` +- ``kvm-steal-time`` +- ``kvm-pv-eoi`` +- ``kvmclock-stable-bit`` + +``kvm-msi-ext-dest-id`` feature is enabled by default in x2apic mode with split +irqchip (e.g. "-machine ...,kernel-irqchip=split -cpu ...,x2apic"). + +Note: when CPU model ``host`` is used, QEMU passes through all supported +paravirtualized ``KVM`` features to the guest. + +Existing features +----------------- + +``kvmclock`` + Expose a ``KVM`` specific paravirtualized clocksource to the guest. Supported + since Linux v2.6.26. + +``kvm-nopiodelay`` + The guest doesn't need to perform delays on PIO operations. Supported since + Linux v2.6.26. + +``kvm-mmu`` + This feature is deprecated. + +``kvm-asyncpf`` + Enable asynchronous page fault mechanism. Supported since Linux v2.6.38. + Note: since Linux v5.10 the feature is deprecated and not enabled by ``KVM``. + Use ``kvm-asyncpf-int`` instead. + +``kvm-steal-time`` + Enable stolen (when guest vCPU is not running) time accounting. Supported + since Linux v3.1. + +``kvm-pv-eoi`` + Enable paravirtualized end-of-interrupt signaling. Supported since Linux + v3.10. + +``kvm-pv-unhalt`` + Enable paravirtualized spinlocks support. Supported since Linux v3.12. + +``kvm-pv-tlb-flush`` + Enable paravirtualized TLB flush mechanism. Supported since Linux v4.16. + +``kvm-pv-ipi`` + Enable paravirtualized IPI mechanism. Supported since Linux v4.19. + +``kvm-poll-control`` + Enable host-side polling on HLT control from the guest. Supported since Linux + v5.10. + +``kvm-pv-sched-yield`` + Enable paravirtualized sched yield feature. Supported since Linux v5.10. + +``kvm-asyncpf-int`` + Enable interrupt based asynchronous page fault mechanism. Supported since Linux + v5.10. + +``kvm-msi-ext-dest-id`` + Support 'Extended Destination ID' for external interrupts. The feature allows + to use up to 32768 CPUs without IRQ remapping (but other limits may apply making + the number of supported vCPUs for a given configuration lower). Supported since + Linux v5.10. + +``kvmclock-stable-bit`` + Tell the guest that guest visible TSC value can be fully trusted for kvmclock + computations and no warps are expected. Supported since Linux v2.6.35. + +Supplementary features +---------------------- + +``kvm-pv-enforce-cpuid`` + Limit the supported paravirtualized feature set to the exposed features only. + Note, by default, ``KVM`` allows the guest to use all currently supported + paravirtualized features even when they were not announced in guest visible + CPUIDs. Supported since Linux v5.10. + + +Useful links +------------ + +Please refer to Documentation/virt/kvm in Linux for additional details. diff --git a/docs/system/i386/sgx.rst b/docs/system/i386/sgx.rst new file mode 100644 index 0000000000..9aa161af1a --- /dev/null +++ b/docs/system/i386/sgx.rst @@ -0,0 +1,165 @@ +Software Guard eXtensions (SGX) +=============================== + +Overview +-------- + +Intel Software Guard eXtensions (SGX) is a set of instructions and mechanisms +for memory accesses in order to provide security accesses for sensitive +applications and data. SGX allows an application to use it's pariticular +address space as an *enclave*, which is a protected area provides confidentiality +and integrity even in the presence of privileged malware. Accesses to the +enclave memory area from any software not resident in the enclave are prevented, +including those from privileged software. + +Virtual SGX +----------- + +SGX feature is exposed to guest via SGX CPUID. Looking at SGX CPUID, we can +report the same CPUID info to guest as on host for most of SGX CPUID. With +reporting the same CPUID guest is able to use full capacity of SGX, and KVM +doesn't need to emulate those info. + +The guest's EPC base and size are determined by Qemu, and KVM needs Qemu to +notify such info to it before it can initialize SGX for guest. + +Virtual EPC +~~~~~~~~~~~ + +By default, Qemu does not assign EPC to a VM, i.e. fully enabling SGX in a VM +requires explicit allocation of EPC to the VM. Similar to other specialized +memory types, e.g. hugetlbfs, EPC is exposed as a memory backend. + +SGX EPC is enumerated through CPUID, i.e. EPC "devices" need to be realized +prior to realizing the vCPUs themselves, which occurs long before generic +devices are parsed and realized. This limitation means that EPC does not +require -maxmem as EPC is not treated as {cold,hot}plugged memory. + +Qemu does not artificially restrict the number of EPC sections exposed to a +guest, e.g. Qemu will happily allow you to create 64 1M EPC sections. Be aware +that some kernels may not recognize all EPC sections, e.g. the Linux SGX driver +is hardwired to support only 8 EPC sections. + +The following Qemu snippet creates two EPC sections, with 64M pre-allocated +to the VM and an additional 28M mapped but not allocated:: + + -object memory-backend-epc,id=mem1,size=64M,prealloc=on \ + -object memory-backend-epc,id=mem2,size=28M \ + -M sgx-epc.0.memdev=mem1,sgx-epc.1.memdev=mem2 + +Note: + +The size and location of the virtual EPC are far less restricted compared +to physical EPC. Because physical EPC is protected via range registers, +the size of the physical EPC must be a power of two (though software sees +a subset of the full EPC, e.g. 92M or 128M) and the EPC must be naturally +aligned. KVM SGX's virtual EPC is purely a software construct and only +requires the size and location to be page aligned. Qemu enforces the EPC +size is a multiple of 4k and will ensure the base of the EPC is 4k aligned. +To simplify the implementation, EPC is always located above 4g in the guest +physical address space. + +Migration +~~~~~~~~~ + +Qemu/KVM doesn't prevent live migrating SGX VMs, although from hardware's +perspective, SGX doesn't support live migration, since both EPC and the SGX +key hierarchy are bound to the physical platform. However live migration +can be supported in the sense if guest software stack can support recreating +enclaves when it suffers sudden lose of EPC; and if guest enclaves can detect +SGX keys being changed, and handle gracefully. For instance, when ERESUME fails +with #PF.SGX, guest software can gracefully detect it and recreate enclaves; +and when enclave fails to unseal sensitive information from outside, it can +detect such error and sensitive information can be provisioned to it again. + +CPUID +~~~~~ + +Due to its myriad dependencies, SGX is currently not listed as supported +in any of Qemu's built-in CPU configuration. To expose SGX (and SGX Launch +Control) to a guest, you must either use ``-cpu host`` to pass-through the +host CPU model, or explicitly enable SGX when using a built-in CPU model, +e.g. via ``-cpu ,+sgx`` or ``-cpu ,+sgx,+sgxlc``. + +All SGX sub-features enumerated through CPUID, e.g. SGX2, MISCSELECT, +ATTRIBUTES, etc... can be restricted via CPUID flags. Be aware that enforcing +restriction of MISCSELECT, ATTRIBUTES and XFRM requires intercepting ECREATE, +i.e. may marginally reduce SGX performance in the guest. All SGX sub-features +controlled via -cpu are prefixed with "sgx", e.g.:: + + $ qemu-system-x86_64 -cpu help | xargs printf "%s\n" | grep sgx + sgx + sgx-debug + sgx-encls-c + sgx-enclv + sgx-exinfo + sgx-kss + sgx-mode64 + sgx-provisionkey + sgx-tokenkey + sgx1 + sgx2 + sgxlc + +The following Qemu snippet passes through the host CPU but restricts access to +the provision and EINIT token keys:: + + -cpu host,-sgx-provisionkey,-sgx-tokenkey + +SGX sub-features cannot be emulated, i.e. sub-features that are not present +in hardware cannot be forced on via '-cpu'. + +Virtualize SGX Launch Control +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Qemu SGX support for Launch Control (LC) is passive, in the sense that it +does not actively change the LC configuration. Qemu SGX provides the user +the ability to set/clear the CPUID flag (and by extension the associated +IA32_FEATURE_CONTROL MSR bit in fw_cfg) and saves/restores the LE Hash MSRs +when getting/putting guest state, but Qemu does not add new controls to +directly modify the LC configuration. Similar to hardware behavior, locking +the LC configuration to a non-Intel value is left to guest firmware. Unlike +host bios setting for SGX launch control(LC), there is no special bios setting +for SGX guest by our design. If host is in locked mode, we can still allow +creating VM with SGX. + +Feature Control +~~~~~~~~~~~~~~~ + +Qemu SGX updates the ``etc/msr_feature_control`` fw_cfg entry to set the SGX +(bit 18) and SGX LC (bit 17) flags based on their respective CPUID support, +i.e. existing guest firmware will automatically set SGX and SGX LC accordingly, +assuming said firmware supports fw_cfg.msr_feature_control. + +Launching a guest +----------------- + +To launch a SGX guest: + +.. parsed-literal:: + + |qemu_system_x86| \\ + -cpu host,+sgx-provisionkey \\ + -object memory-backend-epc,id=mem1,size=64M,prealloc=on \\ + -object memory-backend-epc,id=mem2,size=28M \\ + -M sgx-epc.0.memdev=mem1,sgx-epc.1.memdev=mem2 + +Utilizing SGX in the guest requires a kernel/OS with SGX support. +The support can be determined in guest by:: + + $ grep sgx /proc/cpuinfo + +and SGX epc info by:: + + $ dmesg | grep sgx + [ 1.242142] sgx: EPC section 0x180000000-0x181bfffff + [ 1.242319] sgx: EPC section 0x181c00000-0x1837fffff + +References +---------- + +- `SGX Homepage `__ + +- `SGX SDK `__ + +- SGX specification: Intel SDM Volume 3 diff --git a/docs/system/ppc/powernv.rst b/docs/system/ppc/powernv.rst index 4c4cdea527..86186b7d2c 100644 --- a/docs/system/ppc/powernv.rst +++ b/docs/system/ppc/powernv.rst @@ -53,8 +53,7 @@ initramfs ``skiroot``. Source code can be found on GitHub: https://github.com/open-power. -Prebuilt images of ``skiboot`` and ``skiboot`` are made available on the `OpenPOWER `__ site. To boot a POWER9 machine, use the `witherspoon `__ images. For POWER8, use -the `palmetto `__ images. +Prebuilt images of ``skiboot`` and ``skiroot`` are made available on the `OpenPOWER `__ site. QEMU includes a prebuilt image of ``skiboot`` which is updated when a more recent version is required by the models. diff --git a/docs/system/target-i386.rst b/docs/system/target-i386.rst index c9720a8cd1..4daa53c35d 100644 --- a/docs/system/target-i386.rst +++ b/docs/system/target-i386.rst @@ -26,6 +26,8 @@ Architectural features :maxdepth: 1 i386/cpu + i386/kvm-pv + i386/sgx .. _pcsys_005freq: diff --git a/docs/tools/qemu-img.rst b/docs/tools/qemu-img.rst index d58980aef8..c0a4443146 100644 --- a/docs/tools/qemu-img.rst +++ b/docs/tools/qemu-img.rst @@ -415,7 +415,7 @@ Command description: 4 Error on reading data -.. option:: convert [--object OBJECTDEF] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps [--skip-broken-bitmaps]] [-U] [-C] [-c] [-p] [-q] [-n] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-O OUTPUT_FMT] [-B BACKING_FILE [-F backing_fmt]] [-o OPTIONS] [-l SNAPSHOT_PARAM] [-S SPARSE_SIZE] [-r RATE_LIMIT] [-m NUM_COROUTINES] [-W] FILENAME [FILENAME2 [...]] OUTPUT_FILENAME +.. option:: convert [--object OBJECTDEF] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps [--skip-broken-bitmaps]] [-U] [-C] [-c] [-p] [-q] [-n] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-O OUTPUT_FMT] [-B BACKING_FILE [-F BACKING_FMT]] [-o OPTIONS] [-l SNAPSHOT_PARAM] [-S SPARSE_SIZE] [-r RATE_LIMIT] [-m NUM_COROUTINES] [-W] FILENAME [FILENAME2 [...]] OUTPUT_FILENAME Convert the disk image *FILENAME* or a snapshot *SNAPSHOT_PARAM* to disk image *OUTPUT_FILENAME* using format *OUTPUT_FMT*. It can diff --git a/docs/tools/qemu-nbd.rst b/docs/tools/qemu-nbd.rst index e39a9f4b1a..56e54cd441 100644 --- a/docs/tools/qemu-nbd.rst +++ b/docs/tools/qemu-nbd.rst @@ -99,8 +99,10 @@ driver options if ``--image-opts`` is specified. .. option:: --cache=CACHE - The cache mode to be used with the file. See the documentation of - the emulator's ``-drive cache=...`` option for allowed values. + The cache mode to be used with the file. Valid values are: + ``none``, ``writeback`` (the default), ``writethrough``, + ``directsync`` and ``unsafe``. See the documentation of + the emulator's ``-drive cache=...`` option for more info. .. option:: -n, --nocache diff --git a/docs/tools/virtiofsd.rst b/docs/tools/virtiofsd.rst index b208f2a6f0..cc31402830 100644 --- a/docs/tools/virtiofsd.rst +++ b/docs/tools/virtiofsd.rst @@ -183,6 +183,12 @@ Using ':' as the separator a rule is of the form: 'ok' as either an explicit terminator or for special handling of certain patterns. +- 'unsupported' - If a client tries to use a name matching 'key' it's + denied using ENOTSUP; when the server passes an attribute + name matching 'prepend' it's hidden. In many ways it's use is very like + 'ok' as either an explicit terminator or for special handling of certain + patterns. + **key** is a string tested as a prefix on an attribute name originating on the client. It maybe empty in which case a 'client' rule will always match on client names. diff --git a/dtc b/dtc index 85e5d83984..b6910bec11 160000 --- a/dtc +++ b/dtc @@ -1 +1 @@ -Subproject commit 85e5d839847af54efab170f2b1331b2a6421e647 +Subproject commit b6910bec11614980a21e46fbccc35934b671bd81 diff --git a/dump/dump.c b/dump/dump.c index ab625909f3..662d0a62cd 100644 --- a/dump/dump.c +++ b/dump/dump.c @@ -29,6 +29,7 @@ #include "qemu/error-report.h" #include "qemu/main-loop.h" #include "hw/misc/vmcoreinfo.h" +#include "migration/blocker.h" #ifdef TARGET_X86_64 #include "win_dump.h" @@ -47,6 +48,8 @@ #define MAX_GUEST_NOTE_SIZE (1 << 20) /* 1MB should be enough */ +static Error *dump_migration_blocker; + #define ELF_NOTE_SIZE(hdr_size, name_size, desc_size) \ ((DIV_ROUND_UP((hdr_size), 4) + \ DIV_ROUND_UP((name_size), 4) + \ @@ -101,6 +104,7 @@ static int dump_cleanup(DumpState *s) qemu_mutex_unlock_iothread(); } } + migrate_del_blocker(dump_migration_blocker); return 0; } @@ -2005,6 +2009,21 @@ void qmp_dump_guest_memory(bool paging, const char *file, return; } + if (!dump_migration_blocker) { + error_setg(&dump_migration_blocker, + "Live migration disabled: dump-guest-memory in progress"); + } + + /* + * Allows even for -only-migratable, but forbid migration during the + * process of dump guest memory. + */ + if (migrate_add_blocker_internal(dump_migration_blocker, errp)) { + /* Remember to release the fd before passing it over to dump state */ + close(fd); + return; + } + s = &dump_state_global; dump_state_prepare(s); diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc index dddee92d6e..41d4b17e41 100644 --- a/fpu/softfloat-parts.c.inc +++ b/fpu/softfloat-parts.c.inc @@ -1219,14 +1219,35 @@ static FloatPartsN *partsN(minmax)(FloatPartsN *a, FloatPartsN *b, if (unlikely(ab_mask & float_cmask_anynan)) { /* - * For minnum/maxnum, if one operand is a QNaN, and the other + * For minNum/maxNum (IEEE 754-2008) + * or minimumNumber/maximumNumber (IEEE 754-2019), + * if one operand is a QNaN, and the other * operand is numerical, then return numerical argument. */ - if ((flags & minmax_isnum) + if ((flags & (minmax_isnum | minmax_isnumber)) && !(ab_mask & float_cmask_snan) && (ab_mask & ~float_cmask_qnan)) { return is_nan(a->cls) ? b : a; } + + /* + * In IEEE 754-2019, minNum, maxNum, minNumMag and maxNumMag + * are removed and replaced with minimum, minimumNumber, maximum + * and maximumNumber. + * minimumNumber/maximumNumber behavior for SNaN is changed to: + * If both operands are NaNs, a QNaN is returned. + * If either operand is a SNaN, + * an invalid operation exception is signaled, + * but unless both operands are NaNs, + * the SNaN is otherwise ignored and not converted to a QNaN. + */ + if ((flags & minmax_isnumber) + && (ab_mask & float_cmask_snan) + && (ab_mask & ~float_cmask_anynan)) { + float_raise(float_flag_invalid, s); + return is_nan(a->cls) ? b : a; + } + return parts_pick_nan(a, b, s); } diff --git a/fpu/softfloat.c b/fpu/softfloat.c index 6e769f990c..9a28720d82 100644 --- a/fpu/softfloat.c +++ b/fpu/softfloat.c @@ -436,6 +436,11 @@ enum { minmax_isnum = 2, /* Set for the IEEE 754-2008 minNumMag() and minNumMag() operations. */ minmax_ismag = 4, + /* + * Set for the IEEE 754-2019 minimumNumber() and maximumNumber() + * operations. + */ + minmax_isnumber = 8, }; /* Simple helpers for checking if, or what kind of, NaN we have */ @@ -3927,12 +3932,14 @@ static float128 float128_minmax(float128 a, float128 b, { return type##_minmax(a, b, s, flags); } #define MINMAX_2(type) \ - MINMAX_1(type, max, 0) \ - MINMAX_1(type, maxnum, minmax_isnum) \ - MINMAX_1(type, maxnummag, minmax_isnum | minmax_ismag) \ - MINMAX_1(type, min, minmax_ismin) \ - MINMAX_1(type, minnum, minmax_ismin | minmax_isnum) \ - MINMAX_1(type, minnummag, minmax_ismin | minmax_isnum | minmax_ismag) + MINMAX_1(type, max, 0) \ + MINMAX_1(type, maxnum, minmax_isnum) \ + MINMAX_1(type, maxnummag, minmax_isnum | minmax_ismag) \ + MINMAX_1(type, maximum_number, minmax_isnumber) \ + MINMAX_1(type, min, minmax_ismin) \ + MINMAX_1(type, minnum, minmax_ismin | minmax_isnum) \ + MINMAX_1(type, minnummag, minmax_ismin | minmax_isnum | minmax_ismag) \ + MINMAX_1(type, minimum_number, minmax_ismin | minmax_isnumber) \ MINMAX_2(float16) MINMAX_2(bfloat16) diff --git a/fsdev/9p-marshal.c b/fsdev/9p-marshal.c index a01bba6908..51881fe220 100644 --- a/fsdev/9p-marshal.c +++ b/fsdev/9p-marshal.c @@ -18,6 +18,8 @@ #include "9p-marshal.h" +P9ARRAY_DEFINE_TYPE(V9fsString, v9fs_string_free); + void v9fs_string_free(V9fsString *str) { g_free(str->data); diff --git a/fsdev/9p-marshal.h b/fsdev/9p-marshal.h index ceaf2f521e..f1abbe151c 100644 --- a/fsdev/9p-marshal.h +++ b/fsdev/9p-marshal.h @@ -1,10 +1,13 @@ #ifndef QEMU_9P_MARSHAL_H #define QEMU_9P_MARSHAL_H +#include "p9array.h" + typedef struct V9fsString { uint16_t size; char *data; } V9fsString; +P9ARRAY_DECLARE_TYPE(V9fsString); typedef struct V9fsQID { uint8_t type; diff --git a/fsdev/file-op-9p.h b/fsdev/file-op-9p.h index 42f677cf38..8fd89f0447 100644 --- a/fsdev/file-op-9p.h +++ b/fsdev/file-op-9p.h @@ -18,6 +18,7 @@ #include #include #include "qemu-fsdev-throttle.h" +#include "p9array.h" #define SM_LOCAL_MODE_BITS 0600 #define SM_LOCAL_DIR_MODE_BITS 0700 @@ -105,6 +106,7 @@ struct V9fsPath { uint16_t size; char *data; }; +P9ARRAY_DECLARE_TYPE(V9fsPath); typedef union V9fsFidOpenState V9fsFidOpenState; diff --git a/fsdev/p9array.h b/fsdev/p9array.h new file mode 100644 index 0000000000..6aa25327ca --- /dev/null +++ b/fsdev/p9array.h @@ -0,0 +1,160 @@ +/* + * P9Array - deep auto free C-array + * + * Copyright (c) 2021 Crudebyte + * + * Authors: + * Christian Schoenebeck + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef QEMU_P9ARRAY_H +#define QEMU_P9ARRAY_H + +#include "qemu/compiler.h" + +/** + * P9Array provides a mechanism to access arrays in common C-style (e.g. by + * square bracket [] operator) in conjunction with reference variables that + * perform deep auto free of the array when leaving the scope of the auto + * reference variable. That means not only is the array itself automatically + * freed, but also memory dynamically allocated by the individual array + * elements. + * + * Example: + * + * Consider the following user struct @c Foo which shall be used as scalar + * (element) type of an array: + * @code + * typedef struct Foo { + * int i; + * char *s; + * } Foo; + * @endcode + * and assume it has the following function to free memory allocated by @c Foo + * instances: + * @code + * void free_foo(Foo *foo) { + * free(foo->s); + * } + * @endcode + * Add the following to a shared header file: + * @code + * P9ARRAY_DECLARE_TYPE(Foo); + * @endcode + * and the following to a C unit file: + * @code + * P9ARRAY_DEFINE_TYPE(Foo, free_foo); + * @endcode + * Finally the array may then be used like this: + * @code + * void doSomething(size_t n) { + * P9ARRAY_REF(Foo) foos = NULL; + * P9ARRAY_NEW(Foo, foos, n); + * for (size_t i = 0; i < n; ++i) { + * foos[i].i = i; + * foos[i].s = calloc(4096, 1); + * snprintf(foos[i].s, 4096, "foo %d", i); + * if (...) { + * return; // array auto freed here + * } + * } + * // array auto freed here + * } + * @endcode + */ + +/** + * Declares an array type for the passed @a scalar_type. + * + * This is typically used from a shared header file. + * + * @param scalar_type - type of the individual array elements + */ +#define P9ARRAY_DECLARE_TYPE(scalar_type) \ + typedef struct P9Array##scalar_type { \ + size_t len; \ + scalar_type first[]; \ + } P9Array##scalar_type; \ + \ + void p9array_new_##scalar_type(scalar_type **auto_var, size_t len); \ + void p9array_auto_free_##scalar_type(scalar_type **auto_var); \ + +/** + * Defines an array type for the passed @a scalar_type and appropriate + * @a scalar_cleanup_func. + * + * This is typically used from a C unit file. + * + * @param scalar_type - type of the individual array elements + * @param scalar_cleanup_func - appropriate function to free memory dynamically + * allocated by individual array elements before + */ +#define P9ARRAY_DEFINE_TYPE(scalar_type, scalar_cleanup_func) \ + void p9array_new_##scalar_type(scalar_type **auto_var, size_t len) \ + { \ + p9array_auto_free_##scalar_type(auto_var); \ + P9Array##scalar_type *arr = g_malloc0(sizeof(P9Array##scalar_type) + \ + len * sizeof(scalar_type)); \ + arr->len = len; \ + *auto_var = &arr->first[0]; \ + } \ + \ + void p9array_auto_free_##scalar_type(scalar_type **auto_var) \ + { \ + scalar_type *first = (*auto_var); \ + if (!first) { \ + return; \ + } \ + P9Array##scalar_type *arr = (P9Array##scalar_type *) ( \ + ((char *)first) - offsetof(P9Array##scalar_type, first) \ + ); \ + for (size_t i = 0; i < arr->len; ++i) { \ + scalar_cleanup_func(&arr->first[i]); \ + } \ + g_free(arr); \ + } \ + +/** + * Used to declare a reference variable (unique pointer) for an array. After + * leaving the scope of the reference variable, the associated array is + * automatically freed. + * + * @param scalar_type - type of the individual array elements + */ +#define P9ARRAY_REF(scalar_type) \ + __attribute((__cleanup__(p9array_auto_free_##scalar_type))) scalar_type* + +/** + * Allocates a new array of passed @a scalar_type with @a len number of array + * elements and assigns the created array to the reference variable + * @a auto_var. + * + * @param scalar_type - type of the individual array elements + * @param auto_var - destination reference variable + * @param len - amount of array elements to be allocated immediately + */ +#define P9ARRAY_NEW(scalar_type, auto_var, len) \ + QEMU_BUILD_BUG_MSG( \ + !__builtin_types_compatible_p(scalar_type, typeof(*auto_var)), \ + "P9Array scalar type mismatch" \ + ); \ + p9array_new_##scalar_type((&auto_var), len) + +#endif /* QEMU_P9ARRAY_H */ diff --git a/gdb-xml/arm-m-profile-mve.xml b/gdb-xml/arm-m-profile-mve.xml new file mode 100644 index 0000000000..cba664c4c5 --- /dev/null +++ b/gdb-xml/arm-m-profile-mve.xml @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + diff --git a/gdb-xml/arm-neon.xml b/gdb-xml/arm-neon.xml index ce3ee03ec4..9dce0a996f 100644 --- a/gdb-xml/arm-neon.xml +++ b/gdb-xml/arm-neon.xml @@ -82,7 +82,5 @@ - - diff --git a/gdb-xml/arm-vfp-sysregs.xml b/gdb-xml/arm-vfp-sysregs.xml new file mode 100644 index 0000000000..c4aa2721c8 --- /dev/null +++ b/gdb-xml/arm-vfp-sysregs.xml @@ -0,0 +1,17 @@ + + + + + + + diff --git a/gdb-xml/arm-vfp.xml b/gdb-xml/arm-vfp.xml index b20881e9a9..ebed5b3d57 100644 --- a/gdb-xml/arm-vfp.xml +++ b/gdb-xml/arm-vfp.xml @@ -23,7 +23,5 @@ - - diff --git a/gdb-xml/arm-vfp3.xml b/gdb-xml/arm-vfp3.xml index 227afd8017..ef391c7144 100644 --- a/gdb-xml/arm-vfp3.xml +++ b/gdb-xml/arm-vfp3.xml @@ -39,7 +39,5 @@ - - diff --git a/gdbstub.c b/gdbstub.c index 36b85aa50e..23baaef40e 100644 --- a/gdbstub.c +++ b/gdbstub.c @@ -3138,8 +3138,12 @@ gdb_handlesig(CPUState *cpu, int sig) tb_flush(cpu); if (sig != 0) { - snprintf(buf, sizeof(buf), "S%02x", target_signal_to_gdb(sig)); - put_packet(buf); + gdb_set_stop_cpu(cpu); + g_string_printf(gdbserver_state.str_buf, + "T%02xthread:", target_signal_to_gdb(sig)); + gdb_append_thread_id(cpu, gdbserver_state.str_buf); + g_string_append_c(gdbserver_state.str_buf, ';'); + put_strbuf(); } /* put_packet() might have detected that the peer terminated the connection. */ diff --git a/hmp-commands-info.hx b/hmp-commands-info.hx index 27206ac049..407a1da800 100644 --- a/hmp-commands-info.hx +++ b/hmp-commands-info.hx @@ -127,21 +127,6 @@ SRST Show local APIC state ERST -#if defined(TARGET_I386) - { - .name = "ioapic", - .args_type = "", - .params = "", - .help = "show io apic state", - .cmd = hmp_info_io_apic, - }, -#endif - -SRST - ``info ioapic`` - Show io APIC state -ERST - { .name = "cpus", .args_type = "", @@ -174,7 +159,7 @@ ERST .args_type = "", .params = "", .help = "show the interrupts statistics (if available)", - .cmd = hmp_info_irq, + .cmd_info_hrt = qmp_x_query_irq, }, SRST @@ -200,7 +185,7 @@ ERST .args_type = "", .params = "", .help = "show RDMA state", - .cmd = hmp_info_rdma, + .cmd_info_hrt = qmp_x_query_rdma, }, SRST @@ -340,7 +325,7 @@ ERST .args_type = "", .params = "", .help = "show NUMA information", - .cmd = hmp_info_numa, + .cmd_info_hrt = qmp_x_query_numa, }, SRST @@ -353,7 +338,7 @@ ERST .args_type = "", .params = "", .help = "show guest USB devices", - .cmd = hmp_info_usb, + .cmd_info_hrt = qmp_x_query_usb, }, SRST @@ -378,7 +363,7 @@ ERST .args_type = "", .params = "", .help = "show profiling information", - .cmd = hmp_info_profile, + .cmd_info_hrt = qmp_x_query_profile, }, SRST @@ -609,7 +594,7 @@ ERST .args_type = "", .params = "", .help = "show roms", - .cmd = hmp_info_roms, + .cmd_info_hrt = qmp_x_query_roms, }, SRST @@ -787,7 +772,7 @@ ERST .args_type = "", .params = "", .help = "Display system ramblock information", - .cmd = hmp_info_ramblock, + .cmd_info_hrt = qmp_x_query_ramblock, }, SRST @@ -877,3 +862,18 @@ SRST ``info dirty_rate`` Display the vcpu dirty rate information. ERST + +#if defined(TARGET_I386) + { + .name = "sgx", + .args_type = "", + .params = "", + .help = "show intel SGX information", + .cmd = hmp_info_sgx, + }, +#endif + +SRST + ``info sgx`` + Show intel SGX information. +ERST diff --git a/hmp-commands.hx b/hmp-commands.hx index cf723c69ac..70a9136ac2 100644 --- a/hmp-commands.hx +++ b/hmp-commands.hx @@ -382,7 +382,7 @@ SRST ERST { - .name = "stop", + .name = "stop|s", .args_type = "", .params = "", .help = "stop emulation", @@ -390,7 +390,7 @@ ERST }, SRST -``stop`` +``stop`` or ``s`` Stop emulation. ERST @@ -1737,8 +1737,10 @@ ERST { .name = "calc_dirty_rate", - .args_type = "second:l,sample_pages_per_GB:l?", - .params = "second [sample_pages_per_GB]", - .help = "start a round of guest dirty rate measurement", + .args_type = "dirty_ring:-r,dirty_bitmap:-b,second:l,sample_pages_per_GB:l?", + .params = "[-r] [-b] second [sample_pages_per_GB]", + .help = "start a round of guest dirty rate measurement (using -r to" + "\n\t\t\t specify dirty ring as the method of calculation and" + "\n\t\t\t -b to specify dirty bitmap as method of calculation)", .cmd = hmp_calc_dirty_rate, }, diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c index c857b31321..15b3f4d385 100644 --- a/hw/9pfs/9p.c +++ b/hw/9pfs/9p.c @@ -50,6 +50,8 @@ enum { Oappend = 0x80, }; +P9ARRAY_DEFINE_TYPE(V9fsPath, v9fs_path_free); + static ssize_t pdu_marshal(V9fsPDU *pdu, size_t offset, const char *fmt, ...) { ssize_t ret; @@ -1262,6 +1264,37 @@ static int coroutine_fn stat_to_v9stat(V9fsPDU *pdu, V9fsPath *path, #define P9_STATS_ALL 0x00003fffULL /* Mask for All fields above */ +/** + * Convert host filesystem's block size into an appropriate block size for + * 9p client (guest OS side). The value returned suggests an "optimum" block + * size for 9p I/O, i.e. to maximize performance. + * + * @pdu: 9p client request + * @blksize: host filesystem's block size + */ +static int32_t blksize_to_iounit(const V9fsPDU *pdu, int32_t blksize) +{ + int32_t iounit = 0; + V9fsState *s = pdu->s; + + /* + * iounit should be multiples of blksize (host filesystem block size) + * as well as less than (client msize - P9_IOHDRSZ) + */ + if (blksize) { + iounit = QEMU_ALIGN_DOWN(s->msize - P9_IOHDRSZ, blksize); + } + if (!iounit) { + iounit = s->msize - P9_IOHDRSZ; + } + return iounit; +} + +static int32_t stat_to_iounit(const V9fsPDU *pdu, const struct stat *stbuf) +{ + return blksize_to_iounit(pdu, stbuf->st_blksize); +} + static int stat_to_v9stat_dotl(V9fsPDU *pdu, const struct stat *stbuf, V9fsStatDotl *v9lstat) { @@ -1273,7 +1306,7 @@ static int stat_to_v9stat_dotl(V9fsPDU *pdu, const struct stat *stbuf, v9lstat->st_gid = stbuf->st_gid; v9lstat->st_rdev = stbuf->st_rdev; v9lstat->st_size = stbuf->st_size; - v9lstat->st_blksize = stbuf->st_blksize; + v9lstat->st_blksize = stat_to_iounit(pdu, stbuf); v9lstat->st_blocks = stbuf->st_blocks; v9lstat->st_atime_sec = stbuf->st_atime; v9lstat->st_atime_nsec = stbuf->st_atim.tv_nsec; @@ -1705,13 +1738,14 @@ static void coroutine_fn v9fs_walk(void *opaque) int name_idx; g_autofree V9fsQID *qids = NULL; int i, err = 0; - V9fsPath dpath, path, *pathes = NULL; + V9fsPath dpath, path; + P9ARRAY_REF(V9fsPath) pathes = NULL; uint16_t nwnames; struct stat stbuf, fidst; g_autofree struct stat *stbufs = NULL; size_t offset = 7; int32_t fid, newfid; - V9fsString *wnames = NULL; + P9ARRAY_REF(V9fsString) wnames = NULL; V9fsFidState *fidp; V9fsFidState *newfidp = NULL; V9fsPDU *pdu = opaque; @@ -1732,10 +1766,10 @@ static void coroutine_fn v9fs_walk(void *opaque) goto out_nofid; } if (nwnames) { - wnames = g_new0(V9fsString, nwnames); + P9ARRAY_NEW(V9fsString, wnames, nwnames); qids = g_new0(V9fsQID, nwnames); stbufs = g_new0(struct stat, nwnames); - pathes = g_new0(V9fsPath, nwnames); + P9ARRAY_NEW(V9fsPath, pathes, nwnames); for (i = 0; i < nwnames; i++) { err = pdu_unmarshal(pdu, offset, "s", &wnames[i]); if (err < 0) { @@ -1867,36 +1901,14 @@ out: v9fs_path_free(&path); out_nofid: pdu_complete(pdu, err); - if (nwnames && nwnames <= P9_MAXWELEM) { - for (name_idx = 0; name_idx < nwnames; name_idx++) { - v9fs_string_free(&wnames[name_idx]); - v9fs_path_free(&pathes[name_idx]); - } - g_free(wnames); - g_free(pathes); - } } static int32_t coroutine_fn get_iounit(V9fsPDU *pdu, V9fsPath *path) { struct statfs stbuf; - int32_t iounit = 0; - V9fsState *s = pdu->s; + int err = v9fs_co_statfs(pdu, path, &stbuf); - /* - * iounit should be multiples of f_bsize (host filesystem block size - * and as well as less than (client msize - P9_IOHDRSZ)) - */ - if (!v9fs_co_statfs(pdu, path, &stbuf)) { - if (stbuf.f_bsize) { - iounit = stbuf.f_bsize; - iounit *= (s->msize - P9_IOHDRSZ) / stbuf.f_bsize; - } - } - if (!iounit) { - iounit = s->msize - P9_IOHDRSZ; - } - return iounit; + return blksize_to_iounit(pdu, (err >= 0) ? stbuf.f_bsize : 0); } static void coroutine_fn v9fs_open(void *opaque) diff --git a/hw/Kconfig b/hw/Kconfig index 8cb7664d70..ad20cce0a9 100644 --- a/hw/Kconfig +++ b/hw/Kconfig @@ -81,3 +81,5 @@ config XLNX_ZYNQMP select REGISTER select CAN_BUS select PTIMER + select XLNX_BBRAM + select XLNX_EFUSE_ZYNQMP diff --git a/hw/acpi/Kconfig b/hw/acpi/Kconfig index 3b5e118c54..622b0b50b7 100644 --- a/hw/acpi/Kconfig +++ b/hw/acpi/Kconfig @@ -51,6 +51,10 @@ config ACPI_VMGENID default y depends on PC +config ACPI_VIOT + bool + depends on ACPI + config ACPI_HW_REDUCED bool select ACPI diff --git a/hw/acpi/acpi-x86-stub.c b/hw/acpi/acpi-x86-stub.c index e9e46c5c5f..3df1e090f4 100644 --- a/hw/acpi/acpi-x86-stub.c +++ b/hw/acpi/acpi-x86-stub.c @@ -3,7 +3,8 @@ #include "hw/i386/acpi-build.h" void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid, - const CPUArchIdList *apic_ids, GArray *entry) + const CPUArchIdList *apic_ids, GArray *entry, + bool force_enabled) { } diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c index d5103e6d7b..b3b3310df3 100644 --- a/hw/acpi/aml-build.c +++ b/hw/acpi/aml-build.c @@ -52,6 +52,19 @@ static void build_append_byte(GArray *array, uint8_t val) g_array_append_val(array, val); } +static void build_append_padded_str(GArray *array, const char *str, + size_t maxlen, char pad) +{ + size_t i; + size_t len = strlen(str); + + g_assert(len <= maxlen); + g_array_append_vals(array, str, len); + for (i = maxlen - len; i > 0; i--) { + g_array_append_val(array, pad); + } +} + static void build_append_array(GArray *array, GArray *val) { g_array_append_vals(array, val->data, val->len); @@ -1692,27 +1705,53 @@ Aml *aml_object_type(Aml *object) return var; } -void -build_header(BIOSLinker *linker, GArray *table_data, - AcpiTableHeader *h, const char *sig, int len, uint8_t rev, - const char *oem_id, const char *oem_table_id) +void acpi_table_begin(AcpiTable *desc, GArray *array) { - unsigned tbl_offset = (char *)h - table_data->data; - unsigned checksum_offset = (char *)&h->checksum - table_data->data; - memcpy(&h->signature, sig, 4); - h->length = cpu_to_le32(len); - h->revision = rev; - strpadcpy((char *)h->oem_id, sizeof h->oem_id, oem_id, ' '); - strpadcpy((char *)h->oem_table_id, sizeof h->oem_table_id, - oem_table_id, ' '); + desc->array = array; + desc->table_offset = array->len; + + /* + * ACPI spec 1.0b + * 5.2.3 System Description Table Header + */ + g_assert(strlen(desc->sig) == 4); + g_array_append_vals(array, desc->sig, 4); /* Signature */ + /* + * reserve space for Length field, which will be patched by + * acpi_table_end() when the table creation is finished. + */ + build_append_int_noprefix(array, 0, 4); /* Length */ + build_append_int_noprefix(array, desc->rev, 1); /* Revision */ + build_append_int_noprefix(array, 0, 1); /* Checksum */ + build_append_padded_str(array, desc->oem_id, 6, ' '); /* OEMID */ + /* OEM Table ID */ + build_append_padded_str(array, desc->oem_table_id, 8, ' '); + build_append_int_noprefix(array, 1, 4); /* OEM Revision */ + g_array_append_vals(array, ACPI_BUILD_APPNAME8, 4); /* Creator ID */ + build_append_int_noprefix(array, 1, 4); /* Creator Revision */ +} + +void acpi_table_end(BIOSLinker *linker, AcpiTable *desc) +{ + /* + * ACPI spec 1.0b + * 5.2.3 System Description Table Header + * Table 5-2 DESCRIPTION_HEADER Fields + */ + const unsigned checksum_offset = 9; + uint32_t table_len = desc->array->len - desc->table_offset; + uint32_t table_len_le = cpu_to_le32(table_len); + gchar *len_ptr = &desc->array->data[desc->table_offset + 4]; + + /* patch "Length" field that has been reserved by acpi_table_begin() + * to the actual length, i.e. accumulated table length from + * acpi_table_begin() till acpi_table_end() + */ + memcpy(len_ptr, &table_len_le, sizeof table_len_le); - h->oem_revision = cpu_to_le32(1); - memcpy(h->asl_compiler_id, ACPI_BUILD_APPNAME8, 4); - h->asl_compiler_revision = cpu_to_le32(1); - /* Checksum to be filled in by Guest linker */ bios_linker_loader_add_checksum(linker, ACPI_BUILD_TABLE_FILE, - tbl_offset, len, checksum_offset); + desc->table_offset, table_len, desc->table_offset + checksum_offset); } void *acpi_data_push(GArray *table_data, unsigned size) @@ -1822,73 +1861,81 @@ build_rsdp(GArray *tbl, BIOSLinker *linker, AcpiRsdpData *rsdp_data) 32); } -/* Build rsdt table */ +/* + * ACPI 1.0 Root System Description Table (RSDT) + */ void build_rsdt(GArray *table_data, BIOSLinker *linker, GArray *table_offsets, const char *oem_id, const char *oem_table_id) { int i; - unsigned rsdt_entries_offset; - AcpiRsdtDescriptorRev1 *rsdt; - int rsdt_start = table_data->len; - const unsigned table_data_len = (sizeof(uint32_t) * table_offsets->len); - const unsigned rsdt_entry_size = sizeof(rsdt->table_offset_entry[0]); - const size_t rsdt_len = sizeof(*rsdt) + table_data_len; + AcpiTable table = { .sig = "RSDT", .rev = 1, + .oem_id = oem_id, .oem_table_id = oem_table_id }; - rsdt = acpi_data_push(table_data, rsdt_len); - rsdt_entries_offset = (char *)rsdt->table_offset_entry - table_data->data; + acpi_table_begin(&table, table_data); for (i = 0; i < table_offsets->len; ++i) { uint32_t ref_tbl_offset = g_array_index(table_offsets, uint32_t, i); - uint32_t rsdt_entry_offset = rsdt_entries_offset + rsdt_entry_size * i; + uint32_t rsdt_entry_offset = table.array->len; - /* rsdt->table_offset_entry to be filled by Guest linker */ + /* reserve space for entry */ + build_append_int_noprefix(table.array, 0, 4); + + /* mark position of RSDT entry to be filled by Guest linker */ bios_linker_loader_add_pointer(linker, - ACPI_BUILD_TABLE_FILE, rsdt_entry_offset, rsdt_entry_size, + ACPI_BUILD_TABLE_FILE, rsdt_entry_offset, 4, ACPI_BUILD_TABLE_FILE, ref_tbl_offset); + } - build_header(linker, table_data, - (void *)(table_data->data + rsdt_start), - "RSDT", rsdt_len, 1, oem_id, oem_table_id); + acpi_table_end(linker, &table); } -/* Build xsdt table */ +/* + * ACPI 2.0 eXtended System Description Table (XSDT) + */ void build_xsdt(GArray *table_data, BIOSLinker *linker, GArray *table_offsets, const char *oem_id, const char *oem_table_id) { int i; - unsigned xsdt_entries_offset; - AcpiXsdtDescriptorRev2 *xsdt; - int xsdt_start = table_data->len; - const unsigned table_data_len = (sizeof(uint64_t) * table_offsets->len); - const unsigned xsdt_entry_size = sizeof(xsdt->table_offset_entry[0]); - const size_t xsdt_len = sizeof(*xsdt) + table_data_len; + AcpiTable table = { .sig = "XSDT", .rev = 1, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + + acpi_table_begin(&table, table_data); - xsdt = acpi_data_push(table_data, xsdt_len); - xsdt_entries_offset = (char *)xsdt->table_offset_entry - table_data->data; for (i = 0; i < table_offsets->len; ++i) { uint64_t ref_tbl_offset = g_array_index(table_offsets, uint32_t, i); - uint64_t xsdt_entry_offset = xsdt_entries_offset + xsdt_entry_size * i; + uint64_t xsdt_entry_offset = table.array->len; - /* xsdt->table_offset_entry to be filled by Guest linker */ + /* reserve space for entry */ + build_append_int_noprefix(table.array, 0, 8); + + /* mark position of RSDT entry to be filled by Guest linker */ bios_linker_loader_add_pointer(linker, - ACPI_BUILD_TABLE_FILE, xsdt_entry_offset, xsdt_entry_size, + ACPI_BUILD_TABLE_FILE, xsdt_entry_offset, 8, ACPI_BUILD_TABLE_FILE, ref_tbl_offset); } - build_header(linker, table_data, - (void *)(table_data->data + xsdt_start), - "XSDT", xsdt_len, 1, oem_id, oem_table_id); + acpi_table_end(linker, &table); } -void build_srat_memory(AcpiSratMemoryAffinity *numamem, uint64_t base, +/* + * ACPI spec, Revision 4.0 + * 5.2.16.2 Memory Affinity Structure + */ +void build_srat_memory(GArray *table_data, uint64_t base, uint64_t len, int node, MemoryAffinityFlags flags) { - numamem->type = ACPI_SRAT_MEMORY; - numamem->length = sizeof(*numamem); - numamem->proximity = cpu_to_le32(node); - numamem->flags = cpu_to_le32(flags); - numamem->base_addr = cpu_to_le64(base); - numamem->range_length = cpu_to_le64(len); + build_append_int_noprefix(table_data, 1, 1); /* Type */ + build_append_int_noprefix(table_data, 40, 1); /* Length */ + build_append_int_noprefix(table_data, node, 4); /* Proximity Domain */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + build_append_int_noprefix(table_data, base, 4); /* Base Address Low */ + /* Base Address High */ + build_append_int_noprefix(table_data, base >> 32, 4); + build_append_int_noprefix(table_data, len, 4); /* Length Low */ + build_append_int_noprefix(table_data, len >> 32, 4); /* Length High */ + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ + build_append_int_noprefix(table_data, flags, 4); /* Flags */ + build_append_int_noprefix(table_data, 0, 8); /* Reserved */ } /* @@ -1898,11 +1945,12 @@ void build_srat_memory(AcpiSratMemoryAffinity *numamem, uint64_t base, void build_slit(GArray *table_data, BIOSLinker *linker, MachineState *ms, const char *oem_id, const char *oem_table_id) { - int slit_start, i, j; - slit_start = table_data->len; + int i, j; int nb_numa_nodes = ms->numa_state->num_nodes; + AcpiTable table = { .sig = "SLIT", .rev = 1, + .oem_id = oem_id, .oem_table_id = oem_table_id }; - acpi_data_push(table_data, sizeof(AcpiTableHeader)); + acpi_table_begin(&table, table_data); build_append_int_noprefix(table_data, nb_numa_nodes, 8); for (i = 0; i < nb_numa_nodes; i++) { @@ -1913,11 +1961,96 @@ void build_slit(GArray *table_data, BIOSLinker *linker, MachineState *ms, 1); } } + acpi_table_end(linker, &table); +} - build_header(linker, table_data, - (void *)(table_data->data + slit_start), - "SLIT", - table_data->len - slit_start, 1, oem_id, oem_table_id); +/* + * ACPI spec, Revision 6.3 + * 5.2.29.1 Processor hierarchy node structure (Type 0) + */ +static void build_processor_hierarchy_node(GArray *tbl, uint32_t flags, + uint32_t parent, uint32_t id, + uint32_t *priv_rsrc, + uint32_t priv_num) +{ + int i; + + build_append_byte(tbl, 0); /* Type 0 - processor */ + build_append_byte(tbl, 20 + priv_num * 4); /* Length */ + build_append_int_noprefix(tbl, 0, 2); /* Reserved */ + build_append_int_noprefix(tbl, flags, 4); /* Flags */ + build_append_int_noprefix(tbl, parent, 4); /* Parent */ + build_append_int_noprefix(tbl, id, 4); /* ACPI Processor ID */ + + /* Number of private resources */ + build_append_int_noprefix(tbl, priv_num, 4); + + /* Private resources[N] */ + if (priv_num > 0) { + assert(priv_rsrc); + for (i = 0; i < priv_num; i++) { + build_append_int_noprefix(tbl, priv_rsrc[i], 4); + } + } +} + +/* + * ACPI spec, Revision 6.3 + * 5.2.29 Processor Properties Topology Table (PPTT) + */ +void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms, + const char *oem_id, const char *oem_table_id) +{ + int pptt_start = table_data->len; + int uid = 0; + int socket; + AcpiTable table = { .sig = "PPTT", .rev = 2, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + + acpi_table_begin(&table, table_data); + + for (socket = 0; socket < ms->smp.sockets; socket++) { + uint32_t socket_offset = table_data->len - pptt_start; + int core; + + build_processor_hierarchy_node( + table_data, + /* + * Physical package - represents the boundary + * of a physical package + */ + (1 << 0), + 0, socket, NULL, 0); + + for (core = 0; core < ms->smp.cores; core++) { + uint32_t core_offset = table_data->len - pptt_start; + int thread; + + if (ms->smp.threads > 1) { + build_processor_hierarchy_node( + table_data, + (0 << 0), /* not a physical package */ + socket_offset, core, NULL, 0); + + for (thread = 0; thread < ms->smp.threads; thread++) { + build_processor_hierarchy_node( + table_data, + (1 << 1) | /* ACPI Processor ID valid */ + (1 << 2) | /* Processor is a Thread */ + (1 << 3), /* Node is a Leaf */ + core_offset, uid++, NULL, 0); + } + } else { + build_processor_hierarchy_node( + table_data, + (1 << 1) | /* ACPI Processor ID valid */ + (1 << 3), /* Node is a Leaf */ + socket_offset, uid++, NULL, 0); + } + } + } + + acpi_table_end(linker, &table); } /* build rev1/rev3/rev5.1 FADT */ @@ -1925,9 +2058,10 @@ void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f, const char *oem_id, const char *oem_table_id) { int off; - int fadt_start = tbl->len; + AcpiTable table = { .sig = "FACP", .rev = f->rev, + .oem_id = oem_id, .oem_table_id = oem_table_id }; - acpi_data_push(tbl, sizeof(AcpiTableHeader)); + acpi_table_begin(&table, tbl); /* FACS address to be filled by Guest linker at runtime */ off = tbl->len; @@ -1991,7 +2125,7 @@ void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f, build_append_int_noprefix(tbl, f->flags, 4); /* Flags */ if (f->rev == 1) { - goto build_hdr; + goto done; } build_append_gas_from_struct(tbl, &f->reset_reg); /* RESET_REG */ @@ -2028,7 +2162,7 @@ void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f, build_append_gas(tbl, AML_AS_SYSTEM_MEMORY, 0 , 0, 0, 0); /* X_GPE1_BLK */ if (f->rev <= 4) { - goto build_hdr; + goto done; } /* SLEEP_CONTROL_REG */ @@ -2039,9 +2173,8 @@ void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f, /* TODO: extra fields need to be added to support revisions above rev5 */ assert(f->rev == 5); -build_hdr: - build_header(linker, tbl, (void *)(tbl->data + fadt_start), - "FACP", tbl->len - fadt_start, f->rev, oem_id, oem_table_id); +done: + acpi_table_end(linker, &table); } #ifdef CONFIG_TPM @@ -2054,13 +2187,14 @@ void build_tpm2(GArray *table_data, BIOSLinker *linker, GArray *tcpalog, const char *oem_id, const char *oem_table_id) { uint8_t start_method_params[12] = {}; - unsigned log_addr_offset, tpm2_start; + unsigned log_addr_offset; uint64_t control_area_start_address; TPMIf *tpmif = tpm_find(); uint32_t start_method; + AcpiTable table = { .sig = "TPM2", .rev = 4, + .oem_id = oem_id, .oem_table_id = oem_table_id }; - tpm2_start = table_data->len; - acpi_data_push(table_data, sizeof(AcpiTableHeader)); + acpi_table_begin(&table, table_data); /* Platform Class */ build_append_int_noprefix(table_data, TPM2_ACPI_CLASS_CLIENT, 2); @@ -2098,9 +2232,7 @@ void build_tpm2(GArray *table_data, BIOSLinker *linker, GArray *tcpalog, bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE, log_addr_offset, 8, ACPI_BUILD_TPMLOG_FILE, 0); - build_header(linker, table_data, - (void *)(table_data->data + tpm2_start), - "TPM2", table_data->len - tpm2_start, 4, oem_id, oem_table_id); + acpi_table_end(linker, &table); } #endif diff --git a/hw/acpi/cpu.c b/hw/acpi/cpu.c index f82e9512fd..b20903ea30 100644 --- a/hw/acpi/cpu.c +++ b/hw/acpi/cpu.c @@ -669,21 +669,8 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts, /* build _MAT object */ assert(adevc && adevc->madt_cpu); - adevc->madt_cpu(adev, i, arch_ids, madt_buf); - switch (madt_buf->data[0]) { - case ACPI_APIC_PROCESSOR: { - AcpiMadtProcessorApic *apic = (void *)madt_buf->data; - apic->flags = cpu_to_le32(1); - break; - } - case ACPI_APIC_LOCAL_X2APIC: { - AcpiMadtProcessorX2Apic *apic = (void *)madt_buf->data; - apic->flags = cpu_to_le32(1); - break; - } - default: - assert(0); - } + adevc->madt_cpu(adev, i, arch_ids, madt_buf, + true); /* set enabled flag */ aml_append(dev, aml_name_decl("_MAT", aml_buffer(madt_buf->len, (uint8_t *)madt_buf->data))); g_array_free(madt_buf, true); diff --git a/hw/acpi/ghes.c b/hw/acpi/ghes.c index a749b84d62..45d9a809cc 100644 --- a/hw/acpi/ghes.c +++ b/hw/acpi/ghes.c @@ -362,18 +362,16 @@ static void build_ghes_v2(GArray *table_data, int source_id, BIOSLinker *linker) void acpi_build_hest(GArray *table_data, BIOSLinker *linker, const char *oem_id, const char *oem_table_id) { - uint64_t hest_start = table_data->len; + AcpiTable table = { .sig = "HEST", .rev = 1, + .oem_id = oem_id, .oem_table_id = oem_table_id }; - /* Hardware Error Source Table header*/ - acpi_data_push(table_data, sizeof(AcpiTableHeader)); + acpi_table_begin(&table, table_data); /* Error Source Count */ build_append_int_noprefix(table_data, ACPI_GHES_ERROR_SOURCE_COUNT, 4); - build_ghes_v2(table_data, ACPI_HEST_SRC_ID_SEA, linker); - build_header(linker, table_data, (void *)(table_data->data + hest_start), - "HEST", table_data->len - hest_start, 1, oem_id, oem_table_id); + acpi_table_end(linker, &table); } void acpi_ghes_add_fw_cfg(AcpiGhesState *ags, FWCfgState *s, diff --git a/hw/acpi/hmat.c b/hw/acpi/hmat.c index edb3fd91b2..6913ebf730 100644 --- a/hw/acpi/hmat.c +++ b/hw/acpi/hmat.c @@ -200,6 +200,8 @@ static void hmat_build_table_structs(GArray *table_data, NumaState *numa_state) HMAT_LB_Info *hmat_lb; NumaHmatCacheOptions *hmat_cache; + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ + for (i = 0; i < numa_state->num_nodes; i++) { flags = 0; @@ -256,14 +258,10 @@ static void hmat_build_table_structs(GArray *table_data, NumaState *numa_state) void build_hmat(GArray *table_data, BIOSLinker *linker, NumaState *numa_state, const char *oem_id, const char *oem_table_id) { - int hmat_start = table_data->len; - - /* reserve space for HMAT header */ - acpi_data_push(table_data, 40); + AcpiTable table = { .sig = "HMAT", .rev = 2, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + acpi_table_begin(&table, table_data); hmat_build_table_structs(table_data, numa_state); - - build_header(linker, table_data, - (void *)(table_data->data + hmat_start), - "HMAT", table_data->len - hmat_start, 2, oem_id, oem_table_id); + acpi_table_end(linker, &table); } diff --git a/hw/acpi/memory_hotplug.c b/hw/acpi/memory_hotplug.c index af37889423..d0fffcf787 100644 --- a/hw/acpi/memory_hotplug.c +++ b/hw/acpi/memory_hotplug.c @@ -8,6 +8,7 @@ #include "qapi/error.h" #include "qapi/qapi-events-acpi.h" #include "qapi/qapi-events-machine.h" +#include "qapi/qapi-events-qdev.h" #define MEMORY_SLOTS_NUMBER "MDNR" #define MEMORY_HOTPLUG_IO_REGION "HPMR" @@ -178,8 +179,16 @@ static void acpi_memory_hotplug_write(void *opaque, hwaddr addr, uint64_t data, hotplug_handler_unplug(hotplug_ctrl, dev, &local_err); if (local_err) { trace_mhp_acpi_pc_dimm_delete_failed(mem_st->selector); - qapi_event_send_mem_unplug_error(dev->id, + + /* + * Send both MEM_UNPLUG_ERROR and DEVICE_UNPLUG_GUEST_ERROR + * while the deprecation of MEM_UNPLUG_ERROR is + * pending. + */ + qapi_event_send_mem_unplug_error(dev->id ? : "", error_get_pretty(local_err)); + qapi_event_send_device_unplug_guest_error(!!dev->id, dev->id, + dev->canonical_path); error_free(local_err); break; } diff --git a/hw/acpi/meson.build b/hw/acpi/meson.build index 7d8c0eb43e..adf6347bc4 100644 --- a/hw/acpi/meson.build +++ b/hw/acpi/meson.build @@ -20,6 +20,7 @@ acpi_ss.add(when: 'CONFIG_ACPI_APEI', if_true: files('ghes.c'), if_false: files( acpi_ss.add(when: 'CONFIG_ACPI_PIIX4', if_true: files('piix4.c')) acpi_ss.add(when: 'CONFIG_ACPI_PCIHP', if_true: files('pcihp.c')) acpi_ss.add(when: 'CONFIG_ACPI_PCIHP', if_false: files('acpi-pci-hotplug-stub.c')) +acpi_ss.add(when: 'CONFIG_ACPI_VIOT', if_true: files('viot.c')) acpi_ss.add(when: 'CONFIG_ACPI_X86_ICH', if_true: files('ich9.c', 'tco.c')) acpi_ss.add(when: 'CONFIG_IPMI', if_true: files('ipmi.c'), if_false: files('ipmi-stub.c')) acpi_ss.add(when: 'CONFIG_PC', if_false: files('acpi-x86-stub.c')) diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c index e3d5fe1939..0d43da19ea 100644 --- a/hw/acpi/nvdimm.c +++ b/hw/acpi/nvdimm.c @@ -44,22 +44,6 @@ static const uint8_t nvdimm_nfit_spa_uuid[] = UUID_LE(0x66f0d379, 0xb4f3, 0x4074, 0xac, 0x43, 0x0d, 0x33, 0x18, 0xb7, 0x8c, 0xdb); -/* - * NVDIMM Firmware Interface Table - * @signature: "NFIT" - * - * It provides information that allows OSPM to enumerate NVDIMM present in - * the platform and associate system physical address ranges created by the - * NVDIMMs. - * - * It is defined in ACPI 6.0: 5.2.25 NVDIMM Firmware Interface Table (NFIT) - */ -struct NvdimmNfitHeader { - ACPI_TABLE_HEADER_DEF - uint32_t reserved; -} QEMU_PACKED; -typedef struct NvdimmNfitHeader NvdimmNfitHeader; - /* * define NFIT structures according to ACPI 6.0: 5.2.25 NVDIMM Firmware * Interface Table (NFIT). @@ -355,10 +339,10 @@ nvdimm_build_structure_caps(GArray *structures, uint32_t capabilities) static GArray *nvdimm_build_device_structure(NVDIMMState *state) { - GSList *device_list = nvdimm_get_device_list(); + GSList *device_list, *list = nvdimm_get_device_list(); GArray *structures = g_array_new(false, true /* clear */, 1); - for (; device_list; device_list = device_list->next) { + for (device_list = list; device_list; device_list = device_list->next) { DeviceState *dev = device_list->data; /* build System Physical Address Range Structure. */ @@ -373,7 +357,7 @@ static GArray *nvdimm_build_device_structure(NVDIMMState *state) /* build NVDIMM Control Region Structure. */ nvdimm_build_structure_dcr(structures, dev); } - g_slist_free(device_list); + g_slist_free(list); if (state->persistence) { nvdimm_build_structure_caps(structures, state->persistence); @@ -401,25 +385,33 @@ void nvdimm_plug(NVDIMMState *state) nvdimm_build_fit_buffer(state); } +/* + * NVDIMM Firmware Interface Table + * @signature: "NFIT" + * + * It provides information that allows OSPM to enumerate NVDIMM present in + * the platform and associate system physical address ranges created by the + * NVDIMMs. + * + * It is defined in ACPI 6.0: 5.2.25 NVDIMM Firmware Interface Table (NFIT) + */ + static void nvdimm_build_nfit(NVDIMMState *state, GArray *table_offsets, GArray *table_data, BIOSLinker *linker, const char *oem_id, const char *oem_table_id) { NvdimmFitBuffer *fit_buf = &state->fit_buf; - unsigned int header; + AcpiTable table = { .sig = "NFIT", .rev = 1, + .oem_id = oem_id, .oem_table_id = oem_table_id }; acpi_add_table(table_offsets, table_data); - /* NFIT header. */ - header = table_data->len; - acpi_data_push(table_data, sizeof(NvdimmNfitHeader)); + acpi_table_begin(&table, table_data); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); /* NVDIMM device structures. */ g_array_append_vals(table_data, fit_buf->fit->data, fit_buf->fit->len); - - build_header(linker, table_data, - (void *)(table_data->data + header), "NFIT", - sizeof(NvdimmNfitHeader) + fit_buf->fit->len, 1, oem_id, - oem_table_id); + acpi_table_end(linker, &table); } #define NVDIMM_DSM_MEMORY_SIZE 4096 @@ -1282,14 +1274,15 @@ static void nvdimm_build_ssdt(GArray *table_offsets, GArray *table_data, NVDIMMState *nvdimm_state, uint32_t ram_slots, const char *oem_id) { + int mem_addr_offset; Aml *ssdt, *sb_scope, *dev; - int mem_addr_offset, nvdimm_ssdt; + AcpiTable table = { .sig = "SSDT", .rev = 1, + .oem_id = oem_id, .oem_table_id = "NVDIMM" }; acpi_add_table(table_offsets, table_data); + acpi_table_begin(&table, table_data); ssdt = init_aml_allocator(); - acpi_data_push(ssdt->buf, sizeof(AcpiTableHeader)); - sb_scope = aml_scope("\\_SB"); dev = aml_device("NVDR"); @@ -1318,8 +1311,6 @@ static void nvdimm_build_ssdt(GArray *table_offsets, GArray *table_data, aml_append(sb_scope, dev); aml_append(ssdt, sb_scope); - nvdimm_ssdt = table_data->len; - /* copy AML table into ACPI tables blob and patch header there */ g_array_append_vals(table_data, ssdt->buf->data, ssdt->buf->len); mem_addr_offset = build_append_named_dword(table_data, @@ -1331,18 +1322,20 @@ static void nvdimm_build_ssdt(GArray *table_offsets, GArray *table_data, bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE, mem_addr_offset, sizeof(uint32_t), NVDIMM_DSM_MEM_FILE, 0); - build_header(linker, table_data, - (void *)(table_data->data + nvdimm_ssdt), - "SSDT", table_data->len - nvdimm_ssdt, 1, oem_id, "NVDIMM"); free_aml_allocator(); + /* + * must be executed as the last so that pointer patching command above + * would be executed by guest before it recalculated checksum which were + * scheduled by acpi_table_end() + */ + acpi_table_end(linker, &table); } void nvdimm_build_srat(GArray *table_data) { - GSList *device_list = nvdimm_get_device_list(); + GSList *device_list, *list = nvdimm_get_device_list(); - for (; device_list; device_list = device_list->next) { - AcpiSratMemoryAffinity *numamem = NULL; + for (device_list = list; device_list; device_list = device_list->next) { DeviceState *dev = device_list->data; Object *obj = OBJECT(dev); uint64_t addr, size; @@ -1352,11 +1345,10 @@ void nvdimm_build_srat(GArray *table_data) addr = object_property_get_uint(obj, PC_DIMM_ADDR_PROP, &error_abort); size = object_property_get_uint(obj, PC_DIMM_SIZE_PROP, &error_abort); - numamem = acpi_data_push(table_data, sizeof *numamem); - build_srat_memory(numamem, addr, size, node, + build_srat_memory(table_data, addr, size, node, MEM_AFFINITY_ENABLED | MEM_AFFINITY_NON_VOLATILE); } - g_slist_free(device_list); + g_slist_free(list); } void nvdimm_build_acpi(GArray *table_offsets, GArray *table_data, diff --git a/hw/acpi/pci.c b/hw/acpi/pci.c index 75b1103ec4..20b70dcd81 100644 --- a/hw/acpi/pci.c +++ b/hw/acpi/pci.c @@ -28,19 +28,20 @@ #include "hw/acpi/pci.h" #include "hw/pci/pcie_host.h" +/* + * PCI Firmware Specification, Revision 3.0 + * 4.1.2 MCFG Table Description. + */ void build_mcfg(GArray *table_data, BIOSLinker *linker, AcpiMcfgInfo *info, const char *oem_id, const char *oem_table_id) { - int mcfg_start = table_data->len; + AcpiTable table = { .sig = "MCFG", .rev = 1, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + + acpi_table_begin(&table, table_data); - /* - * PCI Firmware Specification, Revision 3.0 - * 4.1.2 MCFG Table Description. - */ - acpi_data_push(table_data, sizeof(AcpiTableHeader)); /* Reserved */ build_append_int_noprefix(table_data, 0, 8); - /* * Memory Mapped Enhanced Configuration Space Base Address Allocation * Structure @@ -56,6 +57,5 @@ void build_mcfg(GArray *table_data, BIOSLinker *linker, AcpiMcfgInfo *info, /* Reserved */ build_append_int_noprefix(table_data, 0, 4); - build_header(linker, table_data, (void *)(table_data->data + mcfg_start), - "MCFG", table_data->len - mcfg_start, 1, oem_id, oem_table_id); + acpi_table_end(linker, &table); } diff --git a/hw/acpi/viot.c b/hw/acpi/viot.c new file mode 100644 index 0000000000..c1af75206e --- /dev/null +++ b/hw/acpi/viot.c @@ -0,0 +1,114 @@ +/* + * ACPI Virtual I/O Translation table implementation + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include "qemu/osdep.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/aml-build.h" +#include "hw/acpi/viot.h" +#include "hw/pci/pci.h" +#include "hw/pci/pci_host.h" + +struct viot_pci_ranges { + GArray *blob; + size_t count; + uint16_t output_node; +}; + +/* Build PCI range for a given PCI host bridge */ +static int build_pci_range_node(Object *obj, void *opaque) +{ + struct viot_pci_ranges *pci_ranges = opaque; + GArray *blob = pci_ranges->blob; + + if (object_dynamic_cast(obj, TYPE_PCI_HOST_BRIDGE)) { + PCIBus *bus = PCI_HOST_BRIDGE(obj)->bus; + + if (bus && !pci_bus_bypass_iommu(bus)) { + int min_bus, max_bus; + + pci_bus_range(bus, &min_bus, &max_bus); + + /* Type */ + build_append_int_noprefix(blob, 1 /* PCI range */, 1); + /* Reserved */ + build_append_int_noprefix(blob, 0, 1); + /* Length */ + build_append_int_noprefix(blob, 24, 2); + /* Endpoint start */ + build_append_int_noprefix(blob, PCI_BUILD_BDF(min_bus, 0), 4); + /* PCI Segment start */ + build_append_int_noprefix(blob, 0, 2); + /* PCI Segment end */ + build_append_int_noprefix(blob, 0, 2); + /* PCI BDF start */ + build_append_int_noprefix(blob, PCI_BUILD_BDF(min_bus, 0), 2); + /* PCI BDF end */ + build_append_int_noprefix(blob, PCI_BUILD_BDF(max_bus, 0xff), 2); + /* Output node */ + build_append_int_noprefix(blob, pci_ranges->output_node, 2); + /* Reserved */ + build_append_int_noprefix(blob, 0, 6); + + pci_ranges->count++; + } + } + + return 0; +} + +/* + * Generate a VIOT table with one PCI-based virtio-iommu that manages PCI + * endpoints. + * + * Defined in the ACPI Specification (Version TBD) + */ +void build_viot(MachineState *ms, GArray *table_data, BIOSLinker *linker, + uint16_t virtio_iommu_bdf, const char *oem_id, + const char *oem_table_id) +{ + /* The virtio-iommu node follows the 48-bytes header */ + int viommu_off = 48; + AcpiTable table = { .sig = "VIOT", .rev = 0, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + struct viot_pci_ranges pci_ranges = { + .output_node = viommu_off, + .blob = g_array_new(false, true /* clear */, 1), + }; + + /* Build the list of PCI ranges that this viommu manages */ + object_child_foreach_recursive(OBJECT(ms), build_pci_range_node, + &pci_ranges); + + /* ACPI table header */ + acpi_table_begin(&table, table_data); + /* Node count */ + build_append_int_noprefix(table_data, pci_ranges.count + 1, 2); + /* Node offset */ + build_append_int_noprefix(table_data, viommu_off, 2); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 8); + + /* Virtio-iommu node */ + /* Type */ + build_append_int_noprefix(table_data, 3 /* virtio-pci IOMMU */, 1); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 1); + /* Length */ + build_append_int_noprefix(table_data, 16, 2); + /* PCI Segment */ + build_append_int_noprefix(table_data, 0, 2); + /* PCI BDF number */ + build_append_int_noprefix(table_data, virtio_iommu_bdf, 2); + /* Reserved */ + build_append_int_noprefix(table_data, 0, 8); + + /* PCI ranges found above */ + g_array_append_vals(table_data, pci_ranges.blob->data, + pci_ranges.blob->len); + g_array_free(pci_ranges.blob, true); + + acpi_table_end(linker, &table); +} + diff --git a/hw/acpi/viot.h b/hw/acpi/viot.h new file mode 100644 index 0000000000..9fe565bb87 --- /dev/null +++ b/hw/acpi/viot.h @@ -0,0 +1,13 @@ +/* + * ACPI Virtual I/O Translation Table implementation + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef VIOT_H +#define VIOT_H + +void build_viot(MachineState *ms, GArray *table_data, BIOSLinker *linker, + uint16_t virtio_iommu_bdf, const char *oem_id, + const char *oem_table_id); + +#endif /* VIOT_H */ diff --git a/hw/acpi/vmgenid.c b/hw/acpi/vmgenid.c index 4f41a13ea0..0c9f158ac9 100644 --- a/hw/acpi/vmgenid.c +++ b/hw/acpi/vmgenid.c @@ -29,6 +29,8 @@ void vmgenid_build_acpi(VmGenIdState *vms, GArray *table_data, GArray *guid, Aml *ssdt, *dev, *scope, *method, *addr, *if_ctx; uint32_t vgia_offset; QemuUUID guid_le; + AcpiTable table = { .sig = "SSDT", .rev = 1, + .oem_id = oem_id, .oem_table_id = "VMGENID" }; /* Fill in the GUID values. These need to be converted to little-endian * first, since that's what the guest expects @@ -42,12 +44,10 @@ void vmgenid_build_acpi(VmGenIdState *vms, GArray *table_data, GArray *guid, g_array_insert_vals(guid, VMGENID_GUID_OFFSET, guid_le.data, ARRAY_SIZE(guid_le.data)); - /* Put this in a separate SSDT table */ + /* Put VMGNEID into a separate SSDT table */ + acpi_table_begin(&table, table_data); ssdt = init_aml_allocator(); - /* Reserve space for header */ - acpi_data_push(ssdt->buf, sizeof(AcpiTableHeader)); - /* Storage for the GUID address */ vgia_offset = table_data->len + build_append_named_dword(ssdt->buf, "VGIA"); @@ -116,9 +116,8 @@ void vmgenid_build_acpi(VmGenIdState *vms, GArray *table_data, GArray *guid, ACPI_BUILD_TABLE_FILE, vgia_offset, sizeof(uint32_t), VMGENID_GUID_FW_CFG_FILE, 0); - build_header(linker, table_data, - (void *)(table_data->data + table_data->len - ssdt->buf->len), - "SSDT", ssdt->buf->len, 1, oem_id, "VMGENID"); + /* must be called after above command to ensure correct table checksum */ + acpi_table_end(linker, &table); free_aml_allocator(); } diff --git a/hw/adc/aspeed_adc.c b/hw/adc/aspeed_adc.c new file mode 100644 index 0000000000..c5fcae29f6 --- /dev/null +++ b/hw/adc/aspeed_adc.c @@ -0,0 +1,427 @@ +/* + * Aspeed ADC + * + * Copyright 2017-2021 IBM Corp. + * + * Andrew Jeffery + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "migration/vmstate.h" +#include "hw/adc/aspeed_adc.h" +#include "trace.h" + +#define ASPEED_ADC_MEMORY_REGION_SIZE 0x1000 +#define ASPEED_ADC_ENGINE_MEMORY_REGION_SIZE 0x100 +#define ASPEED_ADC_ENGINE_CH_EN_MASK 0xffff0000 +#define ASPEED_ADC_ENGINE_CH_EN(x) ((BIT(x)) << 16) +#define ASPEED_ADC_ENGINE_INIT BIT(8) +#define ASPEED_ADC_ENGINE_AUTO_COMP BIT(5) +#define ASPEED_ADC_ENGINE_COMP BIT(4) +#define ASPEED_ADC_ENGINE_MODE_MASK 0x0000000e +#define ASPEED_ADC_ENGINE_MODE_OFF (0b000 << 1) +#define ASPEED_ADC_ENGINE_MODE_STANDBY (0b001 << 1) +#define ASPEED_ADC_ENGINE_MODE_NORMAL (0b111 << 1) +#define ASPEED_ADC_ENGINE_EN BIT(0) +#define ASPEED_ADC_HYST_EN BIT(31) + +#define ASPEED_ADC_L_MASK ((1 << 10) - 1) +#define ASPEED_ADC_L(x) ((x) & ASPEED_ADC_L_MASK) +#define ASPEED_ADC_H(x) (((x) >> 16) & ASPEED_ADC_L_MASK) +#define ASPEED_ADC_LH_MASK (ASPEED_ADC_L_MASK << 16 | ASPEED_ADC_L_MASK) +#define LOWER_CHANNEL_MASK ((1 << 10) - 1) +#define LOWER_CHANNEL_DATA(x) ((x) & LOWER_CHANNEL_MASK) +#define UPPER_CHANNEL_DATA(x) (((x) >> 16) & LOWER_CHANNEL_MASK) + +#define TO_REG(addr) (addr >> 2) + +#define ENGINE_CONTROL TO_REG(0x00) +#define INTERRUPT_CONTROL TO_REG(0x04) +#define VGA_DETECT_CONTROL TO_REG(0x08) +#define CLOCK_CONTROL TO_REG(0x0C) +#define DATA_CHANNEL_1_AND_0 TO_REG(0x10) +#define DATA_CHANNEL_7_AND_6 TO_REG(0x1C) +#define DATA_CHANNEL_9_AND_8 TO_REG(0x20) +#define DATA_CHANNEL_15_AND_14 TO_REG(0x2C) +#define BOUNDS_CHANNEL_0 TO_REG(0x30) +#define BOUNDS_CHANNEL_7 TO_REG(0x4C) +#define BOUNDS_CHANNEL_8 TO_REG(0x50) +#define BOUNDS_CHANNEL_15 TO_REG(0x6C) +#define HYSTERESIS_CHANNEL_0 TO_REG(0x70) +#define HYSTERESIS_CHANNEL_7 TO_REG(0x8C) +#define HYSTERESIS_CHANNEL_8 TO_REG(0x90) +#define HYSTERESIS_CHANNEL_15 TO_REG(0xAC) +#define INTERRUPT_SOURCE TO_REG(0xC0) +#define COMPENSATING_AND_TRIMMING TO_REG(0xC4) + +static inline uint32_t update_channels(uint32_t current) +{ + return ((((current >> 16) & ASPEED_ADC_L_MASK) + 7) << 16) | + ((current + 5) & ASPEED_ADC_L_MASK); +} + +static bool breaks_threshold(AspeedADCEngineState *s, int reg) +{ + assert(reg >= DATA_CHANNEL_1_AND_0 && + reg < DATA_CHANNEL_1_AND_0 + s->nr_channels / 2); + + int a_bounds_reg = BOUNDS_CHANNEL_0 + (reg - DATA_CHANNEL_1_AND_0) * 2; + int b_bounds_reg = a_bounds_reg + 1; + uint32_t a_and_b = s->regs[reg]; + uint32_t a_bounds = s->regs[a_bounds_reg]; + uint32_t b_bounds = s->regs[b_bounds_reg]; + uint32_t a = ASPEED_ADC_L(a_and_b); + uint32_t b = ASPEED_ADC_H(a_and_b); + uint32_t a_lower = ASPEED_ADC_L(a_bounds); + uint32_t a_upper = ASPEED_ADC_H(a_bounds); + uint32_t b_lower = ASPEED_ADC_L(b_bounds); + uint32_t b_upper = ASPEED_ADC_H(b_bounds); + + return (a < a_lower || a > a_upper) || + (b < b_lower || b > b_upper); +} + +static uint32_t read_channel_sample(AspeedADCEngineState *s, int reg) +{ + assert(reg >= DATA_CHANNEL_1_AND_0 && + reg < DATA_CHANNEL_1_AND_0 + s->nr_channels / 2); + + /* Poor man's sampling */ + uint32_t value = s->regs[reg]; + s->regs[reg] = update_channels(s->regs[reg]); + + if (breaks_threshold(s, reg)) { + s->regs[INTERRUPT_CONTROL] |= BIT(reg - DATA_CHANNEL_1_AND_0); + qemu_irq_raise(s->irq); + } + + return value; +} + +static uint64_t aspeed_adc_engine_read(void *opaque, hwaddr addr, + unsigned int size) +{ + AspeedADCEngineState *s = ASPEED_ADC_ENGINE(opaque); + int reg = TO_REG(addr); + uint32_t value = 0; + + switch (reg) { + case BOUNDS_CHANNEL_8 ... BOUNDS_CHANNEL_15: + if (s->nr_channels <= 8) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: engine[%u]: " + "bounds register %u invalid, only 0...7 valid\n", + __func__, s->engine_id, reg - BOUNDS_CHANNEL_0); + break; + } + /* fallthrough */ + case HYSTERESIS_CHANNEL_8 ... HYSTERESIS_CHANNEL_15: + if (s->nr_channels <= 8) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: engine[%u]: " + "hysteresis register %u invalid, only 0...7 valid\n", + __func__, s->engine_id, reg - HYSTERESIS_CHANNEL_0); + break; + } + /* fallthrough */ + case BOUNDS_CHANNEL_0 ... BOUNDS_CHANNEL_7: + case HYSTERESIS_CHANNEL_0 ... HYSTERESIS_CHANNEL_7: + case ENGINE_CONTROL: + case INTERRUPT_CONTROL: + case VGA_DETECT_CONTROL: + case CLOCK_CONTROL: + case INTERRUPT_SOURCE: + case COMPENSATING_AND_TRIMMING: + value = s->regs[reg]; + break; + case DATA_CHANNEL_9_AND_8 ... DATA_CHANNEL_15_AND_14: + if (s->nr_channels <= 8) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: engine[%u]: " + "data register %u invalid, only 0...3 valid\n", + __func__, s->engine_id, reg - DATA_CHANNEL_1_AND_0); + break; + } + /* fallthrough */ + case DATA_CHANNEL_1_AND_0 ... DATA_CHANNEL_7_AND_6: + value = read_channel_sample(s, reg); + /* Allow 16-bit reads of the data registers */ + if (addr & 0x2) { + assert(size == 2); + value >>= 16; + } + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: engine[%u]: 0x%" HWADDR_PRIx "\n", + __func__, s->engine_id, addr); + break; + } + + trace_aspeed_adc_engine_read(s->engine_id, addr, value); + return value; +} + +static void aspeed_adc_engine_write(void *opaque, hwaddr addr, uint64_t value, + unsigned int size) +{ + AspeedADCEngineState *s = ASPEED_ADC_ENGINE(opaque); + int reg = TO_REG(addr); + uint32_t init = 0; + + trace_aspeed_adc_engine_write(s->engine_id, addr, value); + + switch (reg) { + case ENGINE_CONTROL: + init = !!(value & ASPEED_ADC_ENGINE_EN); + init *= ASPEED_ADC_ENGINE_INIT; + + value &= ~ASPEED_ADC_ENGINE_INIT; + value |= init; + + value &= ~ASPEED_ADC_ENGINE_AUTO_COMP; + break; + case INTERRUPT_CONTROL: + case VGA_DETECT_CONTROL: + case CLOCK_CONTROL: + break; + case DATA_CHANNEL_9_AND_8 ... DATA_CHANNEL_15_AND_14: + if (s->nr_channels <= 8) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: engine[%u]: " + "data register %u invalid, only 0...3 valid\n", + __func__, s->engine_id, reg - DATA_CHANNEL_1_AND_0); + return; + } + /* fallthrough */ + case BOUNDS_CHANNEL_8 ... BOUNDS_CHANNEL_15: + if (s->nr_channels <= 8) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: engine[%u]: " + "bounds register %u invalid, only 0...7 valid\n", + __func__, s->engine_id, reg - BOUNDS_CHANNEL_0); + return; + } + /* fallthrough */ + case DATA_CHANNEL_1_AND_0 ... DATA_CHANNEL_7_AND_6: + case BOUNDS_CHANNEL_0 ... BOUNDS_CHANNEL_7: + value &= ASPEED_ADC_LH_MASK; + break; + case HYSTERESIS_CHANNEL_8 ... HYSTERESIS_CHANNEL_15: + if (s->nr_channels <= 8) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: engine[%u]: " + "hysteresis register %u invalid, only 0...7 valid\n", + __func__, s->engine_id, reg - HYSTERESIS_CHANNEL_0); + return; + } + /* fallthrough */ + case HYSTERESIS_CHANNEL_0 ... HYSTERESIS_CHANNEL_7: + value &= (ASPEED_ADC_HYST_EN | ASPEED_ADC_LH_MASK); + break; + case INTERRUPT_SOURCE: + value &= 0xffff; + break; + case COMPENSATING_AND_TRIMMING: + value &= 0xf; + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: engine[%u]: " + "0x%" HWADDR_PRIx " 0x%" PRIx64 "\n", + __func__, s->engine_id, addr, value); + break; + } + + s->regs[reg] = value; +} + +static const MemoryRegionOps aspeed_adc_engine_ops = { + .read = aspeed_adc_engine_read, + .write = aspeed_adc_engine_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 2, + .max_access_size = 4, + .unaligned = false, + }, +}; + +static const uint32_t aspeed_adc_resets[ASPEED_ADC_NR_REGS] = { + [ENGINE_CONTROL] = 0x00000000, + [INTERRUPT_CONTROL] = 0x00000000, + [VGA_DETECT_CONTROL] = 0x0000000f, + [CLOCK_CONTROL] = 0x0000000f, +}; + +static void aspeed_adc_engine_reset(DeviceState *dev) +{ + AspeedADCEngineState *s = ASPEED_ADC_ENGINE(dev); + + memcpy(s->regs, aspeed_adc_resets, sizeof(aspeed_adc_resets)); +} + +static void aspeed_adc_engine_realize(DeviceState *dev, Error **errp) +{ + AspeedADCEngineState *s = ASPEED_ADC_ENGINE(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + g_autofree char *name = g_strdup_printf(TYPE_ASPEED_ADC_ENGINE ".%d", + s->engine_id); + + assert(s->engine_id < 2); + + sysbus_init_irq(sbd, &s->irq); + + memory_region_init_io(&s->mmio, OBJECT(s), &aspeed_adc_engine_ops, s, name, + ASPEED_ADC_ENGINE_MEMORY_REGION_SIZE); + + sysbus_init_mmio(sbd, &s->mmio); +} + +static const VMStateDescription vmstate_aspeed_adc_engine = { + .name = TYPE_ASPEED_ADC, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, AspeedADCEngineState, ASPEED_ADC_NR_REGS), + VMSTATE_END_OF_LIST(), + } +}; + +static Property aspeed_adc_engine_properties[] = { + DEFINE_PROP_UINT32("engine-id", AspeedADCEngineState, engine_id, 0), + DEFINE_PROP_UINT32("nr-channels", AspeedADCEngineState, nr_channels, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void aspeed_adc_engine_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = aspeed_adc_engine_realize; + dc->reset = aspeed_adc_engine_reset; + device_class_set_props(dc, aspeed_adc_engine_properties); + dc->desc = "Aspeed Analog-to-Digital Engine"; + dc->vmsd = &vmstate_aspeed_adc_engine; +} + +static const TypeInfo aspeed_adc_engine_info = { + .name = TYPE_ASPEED_ADC_ENGINE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedADCEngineState), + .class_init = aspeed_adc_engine_class_init, +}; + +static void aspeed_adc_instance_init(Object *obj) +{ + AspeedADCState *s = ASPEED_ADC(obj); + AspeedADCClass *aac = ASPEED_ADC_GET_CLASS(obj); + uint32_t nr_channels = ASPEED_ADC_NR_CHANNELS / aac->nr_engines; + + for (int i = 0; i < aac->nr_engines; i++) { + AspeedADCEngineState *engine = &s->engines[i]; + object_initialize_child(obj, "engine[*]", engine, + TYPE_ASPEED_ADC_ENGINE); + qdev_prop_set_uint32(DEVICE(engine), "engine-id", i); + qdev_prop_set_uint32(DEVICE(engine), "nr-channels", nr_channels); + } +} + +static void aspeed_adc_set_irq(void *opaque, int n, int level) +{ + AspeedADCState *s = opaque; + AspeedADCClass *aac = ASPEED_ADC_GET_CLASS(s); + uint32_t pending = 0; + + /* TODO: update Global IRQ status register on AST2600 (Need specs) */ + for (int i = 0; i < aac->nr_engines; i++) { + uint32_t irq_status = s->engines[i].regs[INTERRUPT_CONTROL] & 0xFF; + pending |= irq_status << (i * 8); + } + + qemu_set_irq(s->irq, !!pending); +} + +static void aspeed_adc_realize(DeviceState *dev, Error **errp) +{ + AspeedADCState *s = ASPEED_ADC(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + AspeedADCClass *aac = ASPEED_ADC_GET_CLASS(dev); + + qdev_init_gpio_in_named_with_opaque(DEVICE(sbd), aspeed_adc_set_irq, + s, NULL, aac->nr_engines); + + sysbus_init_irq(sbd, &s->irq); + + memory_region_init(&s->mmio, OBJECT(s), TYPE_ASPEED_ADC, + ASPEED_ADC_MEMORY_REGION_SIZE); + + sysbus_init_mmio(sbd, &s->mmio); + + for (int i = 0; i < aac->nr_engines; i++) { + Object *eng = OBJECT(&s->engines[i]); + + if (!sysbus_realize(SYS_BUS_DEVICE(eng), errp)) { + return; + } + sysbus_connect_irq(SYS_BUS_DEVICE(eng), 0, + qdev_get_gpio_in(DEVICE(sbd), i)); + memory_region_add_subregion(&s->mmio, + i * ASPEED_ADC_ENGINE_MEMORY_REGION_SIZE, + &s->engines[i].mmio); + } +} + +static void aspeed_adc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedADCClass *aac = ASPEED_ADC_CLASS(klass); + + dc->realize = aspeed_adc_realize; + dc->desc = "Aspeed Analog-to-Digital Converter"; + aac->nr_engines = 1; +} + +static void aspeed_2600_adc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedADCClass *aac = ASPEED_ADC_CLASS(klass); + + dc->desc = "ASPEED 2600 ADC Controller"; + aac->nr_engines = 2; +} + +static const TypeInfo aspeed_adc_info = { + .name = TYPE_ASPEED_ADC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = aspeed_adc_instance_init, + .instance_size = sizeof(AspeedADCState), + .class_init = aspeed_adc_class_init, + .class_size = sizeof(AspeedADCClass), + .abstract = true, +}; + +static const TypeInfo aspeed_2400_adc_info = { + .name = TYPE_ASPEED_2400_ADC, + .parent = TYPE_ASPEED_ADC, +}; + +static const TypeInfo aspeed_2500_adc_info = { + .name = TYPE_ASPEED_2500_ADC, + .parent = TYPE_ASPEED_ADC, +}; + +static const TypeInfo aspeed_2600_adc_info = { + .name = TYPE_ASPEED_2600_ADC, + .parent = TYPE_ASPEED_ADC, + .class_init = aspeed_2600_adc_class_init, +}; + +static void aspeed_adc_register_types(void) +{ + type_register_static(&aspeed_adc_engine_info); + type_register_static(&aspeed_adc_info); + type_register_static(&aspeed_2400_adc_info); + type_register_static(&aspeed_2500_adc_info); + type_register_static(&aspeed_2600_adc_info); +} + +type_init(aspeed_adc_register_types); diff --git a/hw/adc/meson.build b/hw/adc/meson.build index ac4f093fea..b29ac7ccdf 100644 --- a/hw/adc/meson.build +++ b/hw/adc/meson.build @@ -1,4 +1,5 @@ softmmu_ss.add(when: 'CONFIG_STM32F2XX_ADC', if_true: files('stm32f2xx_adc.c')) +softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_adc.c')) softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_adc.c')) softmmu_ss.add(when: 'CONFIG_ZYNQ', if_true: files('zynq-xadc.c')) softmmu_ss.add(when: 'CONFIG_MAX111X', if_true: files('max111x.c')) diff --git a/hw/adc/trace-events b/hw/adc/trace-events index 456f21c8f4..5a4c444d77 100644 --- a/hw/adc/trace-events +++ b/hw/adc/trace-events @@ -3,3 +3,6 @@ # npcm7xx_adc.c npcm7xx_adc_read(const char *id, uint64_t offset, uint32_t value) " %s offset: 0x%04" PRIx64 " value 0x%04" PRIx32 npcm7xx_adc_write(const char *id, uint64_t offset, uint32_t value) "%s offset: 0x%04" PRIx64 " value 0x%04" PRIx32 + +aspeed_adc_engine_read(uint32_t engine_id, uint64_t addr, uint64_t value) "engine[%u] 0x%" PRIx64 " 0x%" PRIx64 +aspeed_adc_engine_write(uint32_t engine_id, uint64_t addr, uint64_t value) "engine[%u] 0x%" PRIx64 " 0x%" PRIx64 diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig index 18832abf7d..2d37d29f02 100644 --- a/hw/arm/Kconfig +++ b/hw/arm/Kconfig @@ -381,6 +381,8 @@ config XLNX_VERSAL select XLNX_ZDMA select XLNX_ZYNQMP select OR_IRQ + select XLNX_BBRAM + select XLNX_EFUSE_VERSAL config NPCM7XX bool diff --git a/hw/arm/allwinner-h3.c b/hw/arm/allwinner-h3.c index 27f1070145..f9b7ed1871 100644 --- a/hw/arm/allwinner-h3.c +++ b/hw/arm/allwinner-h3.c @@ -237,7 +237,7 @@ static void allwinner_h3_realize(DeviceState *dev, Error **errp) /* Provide Power State Coordination Interface */ qdev_prop_set_int32(DEVICE(&s->cpus[i]), "psci-conduit", - QEMU_PSCI_CONDUIT_HVC); + QEMU_PSCI_CONDUIT_SMC); /* Disable secondary CPUs */ qdev_prop_set_bit(DEVICE(&s->cpus[i]), "start-powered-off", diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c index ba5f1dc5af..a77f46b3ad 100644 --- a/hw/arm/aspeed.c +++ b/hw/arm/aspeed.c @@ -131,6 +131,21 @@ struct AspeedMachineState { SCU_HW_STRAP_VGA_SIZE_SET(VGA_64M_DRAM) | \ SCU_AST2500_HW_STRAP_RESERVED1) +/* FP5280G2 hardware value: 0XF100D286 */ +#define FP5280G2_BMC_HW_STRAP1 ( \ + SCU_AST2500_HW_STRAP_SPI_AUTOFETCH_ENABLE | \ + SCU_AST2500_HW_STRAP_GPIO_STRAP_ENABLE | \ + SCU_AST2500_HW_STRAP_UART_DEBUG | \ + SCU_AST2500_HW_STRAP_RESERVED28 | \ + SCU_AST2500_HW_STRAP_DDR4_ENABLE | \ + SCU_HW_STRAP_VGA_CLASS_CODE | \ + SCU_HW_STRAP_LPC_RESET_PIN | \ + SCU_HW_STRAP_SPI_MODE(SCU_HW_STRAP_SPI_MASTER) | \ + SCU_AST2500_HW_STRAP_SET_AXI_AHB_RATIO(AXI_AHB_RATIO_2_1) | \ + SCU_HW_STRAP_MAC1_RGMII | \ + SCU_HW_STRAP_VGA_SIZE_SET(VGA_16M_DRAM) | \ + SCU_AST2500_HW_STRAP_RESERVED1) + /* Witherspoon hardware value: 0xF10AD216 (but use romulus definition) */ #define WITHERSPOON_BMC_HW_STRAP1 ROMULUS_BMC_HW_STRAP1 @@ -274,18 +289,17 @@ static void aspeed_board_init_flashes(AspeedSMCState *s, int i ; for (i = 0; i < s->num_cs; ++i) { - AspeedSMCFlash *fl = &s->flashes[i]; DriveInfo *dinfo = drive_get_next(IF_MTD); qemu_irq cs_line; + DeviceState *dev; - fl->flash = qdev_new(flashtype); + dev = qdev_new(flashtype); if (dinfo) { - qdev_prop_set_drive(fl->flash, "drive", - blk_by_legacy_dinfo(dinfo)); + qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(dinfo)); } - qdev_realize_and_unref(fl->flash, BUS(s->spi), &error_fatal); + qdev_realize_and_unref(dev, BUS(s->spi), &error_fatal); - cs_line = qdev_get_gpio_in_named(fl->flash, SSI_GPIO_CS, 0); + cs_line = qdev_get_gpio_in_named(dev, SSI_GPIO_CS, 0); sysbus_connect_irq(SYS_BUS_DEVICE(s), i + 1, cs_line); } } @@ -377,6 +391,7 @@ static void aspeed_machine_init(MachineState *machine) if (drive0) { AspeedSMCFlash *fl = &bmc->soc.fmc.flashes[0]; MemoryRegion *boot_rom = g_new(MemoryRegion, 1); + uint64_t size = memory_region_size(&fl->mmio); /* * create a ROM region using the default mapping window size of @@ -386,15 +401,15 @@ static void aspeed_machine_init(MachineState *machine) */ if (ASPEED_MACHINE(machine)->mmio_exec) { memory_region_init_alias(boot_rom, NULL, "aspeed.boot_rom", - &fl->mmio, 0, fl->size); + &fl->mmio, 0, size); memory_region_add_subregion(get_system_memory(), FIRMWARE_ADDR, boot_rom); } else { memory_region_init_rom(boot_rom, NULL, "aspeed.boot_rom", - fl->size, &error_abort); + size, &error_abort); memory_region_add_subregion(get_system_memory(), FIRMWARE_ADDR, boot_rom); - write_boot_rom(drive0, FIRMWARE_ADDR, fl->size, &error_abort); + write_boot_rom(drive0, FIRMWARE_ADDR, size, &error_abort); } } @@ -430,6 +445,15 @@ static void aspeed_machine_init(MachineState *machine) arm_load_kernel(ARM_CPU(first_cpu), machine, &aspeed_board_binfo); } +static void at24c_eeprom_init(I2CBus *bus, uint8_t addr, uint32_t rsize) +{ + I2CSlave *i2c_dev = i2c_slave_new("at24c-eeprom", addr); + DeviceState *dev = DEVICE(i2c_dev); + + qdev_prop_set_uint32(dev, "rom-size", rsize); + i2c_slave_realize_and_unref(i2c_dev, bus, &error_abort); +} + static void palmetto_bmc_i2c_init(AspeedMachineState *bmc) { AspeedSoCState *soc = &bmc->soc; @@ -689,6 +713,34 @@ static void aspeed_eeprom_init(I2CBus *bus, uint8_t addr, uint32_t rsize) i2c_slave_realize_and_unref(i2c_dev, bus, &error_abort); } +static void fp5280g2_bmc_i2c_init(AspeedMachineState *bmc) +{ + AspeedSoCState *soc = &bmc->soc; + I2CSlave *i2c_mux; + + /* The at24c256 */ + at24c_eeprom_init(aspeed_i2c_get_bus(&soc->i2c, 1), 0x50, 32768); + + /* The fp5280g2 expects a TMP112 but a TMP105 is compatible */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 2), TYPE_TMP105, + 0x48); + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 2), TYPE_TMP105, + 0x49); + + i2c_mux = i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 2), + "pca9546", 0x70); + /* It expects a TMP112 but a TMP105 is compatible */ + i2c_slave_create_simple(pca954x_i2c_get_bus(i2c_mux, 0), TYPE_TMP105, + 0x4a); + + /* It expects a ds3232 but a ds1338 is good enough */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 4), "ds1338", 0x68); + + /* It expects a pca9555 but a pca9552 is compatible */ + i2c_slave_create_simple(aspeed_i2c_get_bus(&soc->i2c, 8), TYPE_PCA9552, + 0x20); +} + static void rainier_bmc_i2c_init(AspeedMachineState *bmc) { AspeedSoCState *soc = &bmc->soc; @@ -1140,6 +1192,24 @@ static void aspeed_machine_g220a_class_init(ObjectClass *oc, void *data) aspeed_soc_num_cpus(amc->soc_name); }; +static void aspeed_machine_fp5280g2_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + + mc->desc = "Inspur FP5280G2 BMC (ARM1176)"; + amc->soc_name = "ast2500-a1"; + amc->hw_strap1 = FP5280G2_BMC_HW_STRAP1; + amc->fmc_model = "n25q512a"; + amc->spi_model = "mx25l25635e"; + amc->num_cs = 2; + amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON; + amc->i2c_init = fp5280g2_bmc_i2c_init; + mc->default_ram_size = 512 * MiB; + mc->default_cpus = mc->min_cpus = mc->max_cpus = + aspeed_soc_num_cpus(amc->soc_name); +}; + static void aspeed_machine_rainier_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -1227,6 +1297,10 @@ static const TypeInfo aspeed_machine_types[] = { .name = MACHINE_TYPE_NAME("g220a-bmc"), .parent = TYPE_ASPEED_MACHINE, .class_init = aspeed_machine_g220a_class_init, + }, { + .name = MACHINE_TYPE_NAME("fp5280g2-bmc"), + .parent = TYPE_ASPEED_MACHINE, + .class_init = aspeed_machine_fp5280g2_class_init, }, { .name = MACHINE_TYPE_NAME("quanta-q71l-bmc"), .parent = TYPE_ASPEED_MACHINE, diff --git a/hw/arm/aspeed_ast2600.c b/hw/arm/aspeed_ast2600.c index 9d70e8e060..0384357a95 100644 --- a/hw/arm/aspeed_ast2600.c +++ b/hw/arm/aspeed_ast2600.c @@ -148,6 +148,9 @@ static void aspeed_soc_ast2600_init(Object *obj) snprintf(typename, sizeof(typename), "aspeed.timer-%s", socname); object_initialize_child(obj, "timerctrl", &s->timerctrl, typename); + snprintf(typename, sizeof(typename), "aspeed.adc-%s", socname); + object_initialize_child(obj, "adc", &s->adc, typename); + snprintf(typename, sizeof(typename), "aspeed.i2c-%s", socname); object_initialize_child(obj, "i2c", &s->i2c, typename); @@ -322,6 +325,14 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq); } + /* ADC */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->adc), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->adc), 0, sc->memmap[ASPEED_DEV_ADC]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->adc), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_ADC)); + /* UART - attach an 8250 to the IO space as our UART */ serial_mm_init(get_system_memory(), sc->memmap[s->uart_default], 2, aspeed_soc_get_irq(s, s->uart_default), 38400, @@ -337,11 +348,8 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) for (i = 0; i < ASPEED_I2C_GET_CLASS(&s->i2c)->num_busses; i++) { qemu_irq irq = qdev_get_gpio_in(DEVICE(&s->a7mpcore), sc->irqmap[ASPEED_DEV_I2C] + i); - /* - * The AST2600 SoC has one IRQ per I2C bus. Skip the common - * IRQ (AST2400 and AST2500) and connect all bussses. - */ - sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c), i + 1, irq); + /* The AST2600 I2C controller has one IRQ per bus. */ + sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c.busses[i]), 0, irq); } /* FMC, The number of CS is set at the board level */ @@ -352,7 +360,7 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) } sysbus_mmio_map(SYS_BUS_DEVICE(&s->fmc), 0, sc->memmap[ASPEED_DEV_FMC]); sysbus_mmio_map(SYS_BUS_DEVICE(&s->fmc), 1, - s->fmc.ctrl->flash_window_base); + ASPEED_SMC_GET_CLASS(&s->fmc)->flash_window_base); sysbus_connect_irq(SYS_BUS_DEVICE(&s->fmc), 0, aspeed_soc_get_irq(s, ASPEED_DEV_FMC)); @@ -367,7 +375,7 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, sc->memmap[ASPEED_DEV_SPI1 + i]); sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 1, - s->spi[i].ctrl->flash_window_base); + ASPEED_SMC_GET_CLASS(&s->spi[i])->flash_window_base); } /* EHCI */ diff --git a/hw/arm/aspeed_soc.c b/hw/arm/aspeed_soc.c index ed84502e23..7d53cf2f51 100644 --- a/hw/arm/aspeed_soc.c +++ b/hw/arm/aspeed_soc.c @@ -162,6 +162,9 @@ static void aspeed_soc_init(Object *obj) snprintf(typename, sizeof(typename), "aspeed.timer-%s", socname); object_initialize_child(obj, "timerctrl", &s->timerctrl, typename); + snprintf(typename, sizeof(typename), "aspeed.adc-%s", socname); + object_initialize_child(obj, "adc", &s->adc, typename); + snprintf(typename, sizeof(typename), "aspeed.i2c-%s", socname); object_initialize_child(obj, "i2c", &s->i2c, typename); @@ -287,6 +290,14 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq); } + /* ADC */ + if (!sysbus_realize(SYS_BUS_DEVICE(&s->adc), errp)) { + return; + } + sysbus_mmio_map(SYS_BUS_DEVICE(&s->adc), 0, sc->memmap[ASPEED_DEV_ADC]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->adc), 0, + aspeed_soc_get_irq(s, ASPEED_DEV_ADC)); + /* UART - attach an 8250 to the IO space as our UART */ serial_mm_init(get_system_memory(), sc->memmap[s->uart_default], 2, aspeed_soc_get_irq(s, s->uart_default), 38400, @@ -310,7 +321,7 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) } sysbus_mmio_map(SYS_BUS_DEVICE(&s->fmc), 0, sc->memmap[ASPEED_DEV_FMC]); sysbus_mmio_map(SYS_BUS_DEVICE(&s->fmc), 1, - s->fmc.ctrl->flash_window_base); + ASPEED_SMC_GET_CLASS(&s->fmc)->flash_window_base); sysbus_connect_irq(SYS_BUS_DEVICE(&s->fmc), 0, aspeed_soc_get_irq(s, ASPEED_DEV_FMC)); @@ -323,7 +334,7 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 0, sc->memmap[ASPEED_DEV_SPI1 + i]); sysbus_mmio_map(SYS_BUS_DEVICE(&s->spi[i]), 1, - s->spi[i].ctrl->flash_window_base); + ASPEED_SMC_GET_CLASS(&s->spi[i])->flash_window_base); } /* EHCI */ diff --git a/hw/arm/boot.c b/hw/arm/boot.c index 57efb61ee4..74ad397b1f 100644 --- a/hw/arm/boot.c +++ b/hw/arm/boot.c @@ -599,10 +599,23 @@ int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo, } g_strfreev(node_path); + /* + * We drop all the memory nodes which correspond to empty NUMA nodes + * from the device tree, because the Linux NUMA binding document + * states they should not be generated. Linux will get the NUMA node + * IDs of the empty NUMA nodes from the distance map if they are needed. + * This means QEMU users may be obliged to provide command lines which + * configure distance maps when the empty NUMA node IDs are needed and + * Linux's default distance map isn't sufficient. + */ if (ms->numa_state != NULL && ms->numa_state->num_nodes > 0) { mem_base = binfo->loader_start; for (i = 0; i < ms->numa_state->num_nodes; i++) { mem_len = ms->numa_state->nodes[i].node_mem; + if (!mem_len) { + continue; + } + rc = fdt_add_memory_node(fdt, acells, mem_base, scells, mem_len, i); if (rc < 0) { diff --git a/hw/arm/npcm7xx.c b/hw/arm/npcm7xx.c index 2ab0080e0b..878c2208e0 100644 --- a/hw/arm/npcm7xx.c +++ b/hw/arm/npcm7xx.c @@ -63,6 +63,8 @@ #define NPCM7XX_ROM_BA (0xffff0000) #define NPCM7XX_ROM_SZ (64 * KiB) +/* SDHCI Modules */ +#define NPCM7XX_MMC_BA (0xf0842000) /* Clock configuration values to be fixed up when bypassing bootloader */ @@ -83,6 +85,7 @@ enum NPCM7xxInterrupt { NPCM7XX_UART3_IRQ, NPCM7XX_EMC1RX_IRQ = 15, NPCM7XX_EMC1TX_IRQ, + NPCM7XX_MMC_IRQ = 26, NPCM7XX_TIMER0_IRQ = 32, /* Timer Module 0 */ NPCM7XX_TIMER1_IRQ, NPCM7XX_TIMER2_IRQ, @@ -443,6 +446,8 @@ static void npcm7xx_init(Object *obj) for (i = 0; i < ARRAY_SIZE(s->emc); i++) { object_initialize_child(obj, "emc[*]", &s->emc[i], TYPE_NPCM7XX_EMC); } + + object_initialize_child(obj, "mmc", &s->mmc, TYPE_NPCM7XX_SDHCI); } static void npcm7xx_realize(DeviceState *dev, Error **errp) @@ -707,6 +712,12 @@ static void npcm7xx_realize(DeviceState *dev, Error **errp) &error_abort); memory_region_add_subregion(get_system_memory(), NPCM7XX_ROM_BA, &s->irom); + /* SDHCI */ + sysbus_realize(SYS_BUS_DEVICE(&s->mmc), &error_abort); + sysbus_mmio_map(SYS_BUS_DEVICE(&s->mmc), 0, NPCM7XX_MMC_BA); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->mmc), 0, + npcm7xx_irq(s, NPCM7XX_MMC_IRQ)); + create_unimplemented_device("npcm7xx.shm", 0xc0001000, 4 * KiB); create_unimplemented_device("npcm7xx.vdmx", 0xe0800000, 4 * KiB); create_unimplemented_device("npcm7xx.pcierc", 0xe1000000, 64 * KiB); @@ -736,7 +747,6 @@ static void npcm7xx_realize(DeviceState *dev, Error **errp) create_unimplemented_device("npcm7xx.usbd[8]", 0xf0838000, 4 * KiB); create_unimplemented_device("npcm7xx.usbd[9]", 0xf0839000, 4 * KiB); create_unimplemented_device("npcm7xx.sd", 0xf0840000, 8 * KiB); - create_unimplemented_device("npcm7xx.mmc", 0xf0842000, 8 * KiB); create_unimplemented_device("npcm7xx.pcimbx", 0xf0848000, 512 * KiB); create_unimplemented_device("npcm7xx.aes", 0xf0858000, 4 * KiB); create_unimplemented_device("npcm7xx.des", 0xf0859000, 4 * KiB); diff --git a/hw/arm/npcm7xx_boards.c b/hw/arm/npcm7xx_boards.c index a656169f61..dec7d16ae5 100644 --- a/hw/arm/npcm7xx_boards.c +++ b/hw/arm/npcm7xx_boards.c @@ -27,6 +27,9 @@ #include "qemu-common.h" #include "qemu/datadir.h" #include "qemu/units.h" +#include "sysemu/blockdev.h" +#include "sysemu/sysemu.h" +#include "sysemu/block-backend.h" #define NPCM750_EVB_POWER_ON_STRAPS 0x00001ff7 #define QUANTA_GSJ_POWER_ON_STRAPS 0x00001fff @@ -81,6 +84,22 @@ static void npcm7xx_connect_dram(NPCM7xxState *soc, MemoryRegion *dram) &error_abort); } +static void sdhci_attach_drive(SDHCIState *sdhci) +{ + DriveInfo *di = drive_get_next(IF_SD); + BlockBackend *blk = di ? blk_by_legacy_dinfo(di) : NULL; + + BusState *bus = qdev_get_child_bus(DEVICE(sdhci), "sd-bus"); + if (bus == NULL) { + error_report("No SD bus found in SOC object"); + exit(1); + } + + DeviceState *carddev = qdev_new(TYPE_SD_CARD); + qdev_prop_set_drive_err(carddev, "drive", blk, &error_fatal); + qdev_realize_and_unref(carddev, bus, &error_fatal); +} + static NPCM7xxState *npcm7xx_create_soc(MachineState *machine, uint32_t hw_straps) { @@ -355,6 +374,7 @@ static void quanta_gbs_init(MachineState *machine) drive_get(IF_MTD, 0, 0)); quanta_gbs_i2c_init(soc); + sdhci_attach_drive(&soc->mmc.sdhci); npcm7xx_load_kernel(machine, soc); } diff --git a/hw/arm/sabrelite.c b/hw/arm/sabrelite.c index 29fc777b61..553608e583 100644 --- a/hw/arm/sabrelite.c +++ b/hw/arm/sabrelite.c @@ -87,7 +87,7 @@ static void sabrelite_init(MachineState *machine) qdev_realize_and_unref(flash_dev, BUS(spi_bus), &error_fatal); cs_line = qdev_get_gpio_in_named(flash_dev, SSI_GPIO_CS, 0); - sysbus_connect_irq(SYS_BUS_DEVICE(spi_dev), 1, cs_line); + qdev_connect_gpio_out(DEVICE(&s->gpio[2]), 19, cs_line); } } } diff --git a/hw/arm/sbsa-ref.c b/hw/arm/sbsa-ref.c index 509c5f09b4..358714bd3e 100644 --- a/hw/arm/sbsa-ref.c +++ b/hw/arm/sbsa-ref.c @@ -670,7 +670,7 @@ static void sbsa_ref_init(MachineState *machine) int n, sbsa_max_cpus; if (!cpu_type_valid(machine->cpu_type)) { - error_report("mach-virt: CPU type %s not supported", machine->cpu_type); + error_report("sbsa-ref: CPU type %s not supported", machine->cpu_type); exit(1); } diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c index 037cc1fd82..674f902652 100644 --- a/hw/arm/virt-acpi-build.c +++ b/hw/arm/virt-acpi-build.c @@ -240,6 +240,29 @@ static void acpi_dsdt_add_tpm(Aml *scope, VirtMachineState *vms) } #endif +#define ID_MAPPING_ENTRY_SIZE 20 +#define SMMU_V3_ENTRY_SIZE 68 +#define ROOT_COMPLEX_ENTRY_SIZE 36 +#define IORT_NODE_OFFSET 48 + +static void build_iort_id_mapping(GArray *table_data, uint32_t input_base, + uint32_t id_count, uint32_t out_ref) +{ + /* Table 4 ID mapping format */ + build_append_int_noprefix(table_data, input_base, 4); /* Input base */ + build_append_int_noprefix(table_data, id_count, 4); /* Number of IDs */ + build_append_int_noprefix(table_data, input_base, 4); /* Output base */ + build_append_int_noprefix(table_data, out_ref, 4); /* Output Reference */ + /* Flags */ + build_append_int_noprefix(table_data, 0 /* Single mapping (disabled) */, 4); +} + +struct AcpiIortIdMapping { + uint32_t input_base; + uint32_t id_count; +}; +typedef struct AcpiIortIdMapping AcpiIortIdMapping; + /* Build the iort ID mapping to SMMUv3 for a given PCI host bridge */ static int iort_host_bridges(Object *obj, void *opaque) @@ -273,20 +296,26 @@ static int iort_idmap_compare(gconstpointer a, gconstpointer b) return idmap_a->input_base - idmap_b->input_base; } +/* + * Input Output Remapping Table (IORT) + * Conforms to "IO Remapping Table System Software on ARM Platforms", + * Document number: ARM DEN 0049E.b, Feb 2021 + */ static void build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) { - int i, nb_nodes, rc_mapping_count, iort_start = table_data->len; + int i, nb_nodes, rc_mapping_count; + const uint32_t iort_node_offset = IORT_NODE_OFFSET; + size_t node_size, smmu_offset = 0; AcpiIortIdMapping *idmap; - AcpiIortItsGroup *its; - AcpiIortTable *iort; - AcpiIortSmmu3 *smmu; - size_t node_size, iort_node_offset, iort_length, smmu_offset = 0; - AcpiIortRC *rc; + uint32_t id = 0; GArray *smmu_idmaps = g_array_new(false, true, sizeof(AcpiIortIdMapping)); GArray *its_idmaps = g_array_new(false, true, sizeof(AcpiIortIdMapping)); - iort = acpi_data_push(table_data, sizeof(*iort)); + AcpiTable table = { .sig = "IORT", .rev = 3, .oem_id = vms->oem_id, + .oem_table_id = vms->oem_table_id }; + /* Table 2 The IORT */ + acpi_table_begin(&table, table_data); if (vms->iommu == VIRT_IOMMU_SMMUV3) { AcpiIortIdMapping next_range = {0}; @@ -324,186 +353,202 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) nb_nodes = 2; /* RC, ITS */ rc_mapping_count = 1; } + /* Number of IORT Nodes */ + build_append_int_noprefix(table_data, nb_nodes, 4); - iort_length = sizeof(*iort); - iort->node_count = cpu_to_le32(nb_nodes); - /* - * Use a copy in case table_data->data moves during acpi_data_push - * operations. - */ - iort_node_offset = sizeof(*iort); - iort->node_offset = cpu_to_le32(iort_node_offset); + /* Offset to Array of IORT Nodes */ + build_append_int_noprefix(table_data, IORT_NODE_OFFSET, 4); + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ - /* ITS group node */ - node_size = sizeof(*its) + sizeof(uint32_t); - iort_length += node_size; - its = acpi_data_push(table_data, node_size); - - its->type = ACPI_IORT_NODE_ITS_GROUP; - its->length = cpu_to_le16(node_size); - its->its_count = cpu_to_le32(1); - its->identifiers[0] = 0; /* MADT translation_id */ + /* Table 12 ITS Group Format */ + build_append_int_noprefix(table_data, 0 /* ITS Group */, 1); /* Type */ + node_size = 20 /* fixed header size */ + 4 /* 1 GIC ITS Identifier */; + build_append_int_noprefix(table_data, node_size, 2); /* Length */ + build_append_int_noprefix(table_data, 1, 1); /* Revision */ + build_append_int_noprefix(table_data, id++, 4); /* Identifier */ + build_append_int_noprefix(table_data, 0, 4); /* Number of ID mappings */ + build_append_int_noprefix(table_data, 0, 4); /* Reference to ID Array */ + build_append_int_noprefix(table_data, 1, 4); /* Number of ITSs */ + /* GIC ITS Identifier Array */ + build_append_int_noprefix(table_data, 0 /* MADT translation_id */, 4); if (vms->iommu == VIRT_IOMMU_SMMUV3) { int irq = vms->irqmap[VIRT_SMMU] + ARM_SPI_BASE; - /* SMMUv3 node */ - smmu_offset = iort_node_offset + node_size; - node_size = sizeof(*smmu) + sizeof(*idmap); - iort_length += node_size; - smmu = acpi_data_push(table_data, node_size); + smmu_offset = table_data->len - table.table_offset; + /* Table 9 SMMUv3 Format */ + build_append_int_noprefix(table_data, 4 /* SMMUv3 */, 1); /* Type */ + node_size = SMMU_V3_ENTRY_SIZE + ID_MAPPING_ENTRY_SIZE; + build_append_int_noprefix(table_data, node_size, 2); /* Length */ + build_append_int_noprefix(table_data, 4, 1); /* Revision */ + build_append_int_noprefix(table_data, id++, 4); /* Identifier */ + build_append_int_noprefix(table_data, 1, 4); /* Number of ID mappings */ + /* Reference to ID Array */ + build_append_int_noprefix(table_data, SMMU_V3_ENTRY_SIZE, 4); + /* Base address */ + build_append_int_noprefix(table_data, vms->memmap[VIRT_SMMU].base, 8); + /* Flags */ + build_append_int_noprefix(table_data, 1 /* COHACC Override */, 4); + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ + build_append_int_noprefix(table_data, 0, 8); /* VATOS address */ + /* Model */ + build_append_int_noprefix(table_data, 0 /* Generic SMMU-v3 */, 4); + build_append_int_noprefix(table_data, irq, 4); /* Event */ + build_append_int_noprefix(table_data, irq + 1, 4); /* PRI */ + build_append_int_noprefix(table_data, irq + 3, 4); /* GERR */ + build_append_int_noprefix(table_data, irq + 2, 4); /* Sync */ + build_append_int_noprefix(table_data, 0, 4); /* Proximity domain */ + /* DeviceID mapping index (ignored since interrupts are GSIV based) */ + build_append_int_noprefix(table_data, 0, 4); - smmu->type = ACPI_IORT_NODE_SMMU_V3; - smmu->length = cpu_to_le16(node_size); - smmu->mapping_count = cpu_to_le32(1); - smmu->mapping_offset = cpu_to_le32(sizeof(*smmu)); - smmu->base_address = cpu_to_le64(vms->memmap[VIRT_SMMU].base); - smmu->flags = cpu_to_le32(ACPI_IORT_SMMU_V3_COHACC_OVERRIDE); - smmu->event_gsiv = cpu_to_le32(irq); - smmu->pri_gsiv = cpu_to_le32(irq + 1); - smmu->sync_gsiv = cpu_to_le32(irq + 2); - smmu->gerr_gsiv = cpu_to_le32(irq + 3); - - /* Identity RID mapping covering the whole input RID range */ - idmap = &smmu->id_mapping_array[0]; - idmap->input_base = 0; - idmap->id_count = cpu_to_le32(0xFFFF); - idmap->output_base = 0; /* output IORT node is the ITS group node (the first node) */ - idmap->output_reference = cpu_to_le32(iort_node_offset); + build_iort_id_mapping(table_data, 0, 0xFFFF, IORT_NODE_OFFSET); } - /* Root Complex Node */ - node_size = sizeof(*rc) + sizeof(*idmap) * rc_mapping_count; - iort_length += node_size; - rc = acpi_data_push(table_data, node_size); + /* Table 17 Root Complex Node */ + build_append_int_noprefix(table_data, 2 /* Root complex */, 1); /* Type */ + node_size = ROOT_COMPLEX_ENTRY_SIZE + + ID_MAPPING_ENTRY_SIZE * rc_mapping_count; + build_append_int_noprefix(table_data, node_size, 2); /* Length */ + build_append_int_noprefix(table_data, 3, 1); /* Revision */ + build_append_int_noprefix(table_data, id++, 4); /* Identifier */ + /* Number of ID mappings */ + build_append_int_noprefix(table_data, rc_mapping_count, 4); + /* Reference to ID Array */ + build_append_int_noprefix(table_data, ROOT_COMPLEX_ENTRY_SIZE, 4); - rc->type = ACPI_IORT_NODE_PCI_ROOT_COMPLEX; - rc->length = cpu_to_le16(node_size); - rc->mapping_count = cpu_to_le32(rc_mapping_count); - rc->mapping_offset = cpu_to_le32(sizeof(*rc)); + /* Table 14 Memory access properties */ + /* CCA: Cache Coherent Attribute */ + build_append_int_noprefix(table_data, 1 /* fully coherent */, 4); + build_append_int_noprefix(table_data, 0, 1); /* AH: Note Allocation Hints */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + /* Table 15 Memory Access Flags */ + build_append_int_noprefix(table_data, 0x3 /* CCA = CPM = DACS = 1 */, 1); - /* fully coherent device */ - rc->memory_properties.cache_coherency = cpu_to_le32(1); - rc->memory_properties.memory_flags = 0x3; /* CCA = CPM = DCAS = 1 */ - rc->pci_segment_number = 0; /* MCFG pci_segment */ + build_append_int_noprefix(table_data, 0, 4); /* ATS Attribute */ + /* MCFG pci_segment */ + build_append_int_noprefix(table_data, 0, 4); /* PCI Segment number */ + /* Memory address size limit */ + build_append_int_noprefix(table_data, 64, 1); + + build_append_int_noprefix(table_data, 0, 3); /* Reserved */ + + /* Output Reference */ if (vms->iommu == VIRT_IOMMU_SMMUV3) { AcpiIortIdMapping *range; /* translated RIDs connect to SMMUv3 node: RC -> SMMUv3 -> ITS */ for (i = 0; i < smmu_idmaps->len; i++) { - idmap = &rc->id_mapping_array[i]; range = &g_array_index(smmu_idmaps, AcpiIortIdMapping, i); - - idmap->input_base = cpu_to_le32(range->input_base); - idmap->id_count = cpu_to_le32(range->id_count); - idmap->output_base = cpu_to_le32(range->input_base); /* output IORT node is the smmuv3 node */ - idmap->output_reference = cpu_to_le32(smmu_offset); + build_iort_id_mapping(table_data, range->input_base, + range->id_count, smmu_offset); } /* bypassed RIDs connect to ITS group node directly: RC -> ITS */ for (i = 0; i < its_idmaps->len; i++) { - idmap = &rc->id_mapping_array[smmu_idmaps->len + i]; range = &g_array_index(its_idmaps, AcpiIortIdMapping, i); - - idmap->input_base = cpu_to_le32(range->input_base); - idmap->id_count = cpu_to_le32(range->id_count); - idmap->output_base = cpu_to_le32(range->input_base); /* output IORT node is the ITS group node (the first node) */ - idmap->output_reference = cpu_to_le32(iort_node_offset); + build_iort_id_mapping(table_data, range->input_base, + range->id_count, iort_node_offset); } } else { - /* Identity RID mapping covering the whole input RID range */ - idmap = &rc->id_mapping_array[0]; - idmap->input_base = cpu_to_le32(0); - idmap->id_count = cpu_to_le32(0xFFFF); - idmap->output_base = cpu_to_le32(0); /* output IORT node is the ITS group node (the first node) */ - idmap->output_reference = cpu_to_le32(iort_node_offset); + build_iort_id_mapping(table_data, 0, 0xFFFF, IORT_NODE_OFFSET); } + acpi_table_end(linker, &table); g_array_free(smmu_idmaps, true); g_array_free(its_idmaps, true); - - /* - * Update the pointer address in case table_data->data moves during above - * acpi_data_push operations. - */ - iort = (AcpiIortTable *)(table_data->data + iort_start); - iort->length = cpu_to_le32(iort_length); - - build_header(linker, table_data, (void *)(table_data->data + iort_start), - "IORT", table_data->len - iort_start, 0, vms->oem_id, - vms->oem_table_id); } +/* + * Serial Port Console Redirection Table (SPCR) + * Rev: 1.07 + */ static void build_spcr(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) { - AcpiSerialPortConsoleRedirection *spcr; - const MemMapEntry *uart_memmap = &vms->memmap[VIRT_UART]; - int irq = vms->irqmap[VIRT_UART] + ARM_SPI_BASE; - int spcr_start = table_data->len; + AcpiTable table = { .sig = "SPCR", .rev = 2, .oem_id = vms->oem_id, + .oem_table_id = vms->oem_table_id }; - spcr = acpi_data_push(table_data, sizeof(*spcr)); + acpi_table_begin(&table, table_data); - spcr->interface_type = 0x3; /* ARM PL011 UART */ + /* Interface Type */ + build_append_int_noprefix(table_data, 3, 1); /* ARM PL011 UART */ + build_append_int_noprefix(table_data, 0, 3); /* Reserved */ + /* Base Address */ + build_append_gas(table_data, AML_AS_SYSTEM_MEMORY, 8, 0, 1, + vms->memmap[VIRT_UART].base); + /* Interrupt Type */ + build_append_int_noprefix(table_data, + (1 << 3) /* Bit[3] ARMH GIC interrupt */, 1); + build_append_int_noprefix(table_data, 0, 1); /* IRQ */ + /* Global System Interrupt */ + build_append_int_noprefix(table_data, + vms->irqmap[VIRT_UART] + ARM_SPI_BASE, 4); + build_append_int_noprefix(table_data, 3 /* 9600 */, 1); /* Baud Rate */ + build_append_int_noprefix(table_data, 0 /* No Parity */, 1); /* Parity */ + /* Stop Bits */ + build_append_int_noprefix(table_data, 1 /* 1 Stop bit */, 1); + /* Flow Control */ + build_append_int_noprefix(table_data, + (1 << 1) /* RTS/CTS hardware flow control */, 1); + /* Terminal Type */ + build_append_int_noprefix(table_data, 0 /* VT100 */, 1); + build_append_int_noprefix(table_data, 0, 1); /* Language */ + /* PCI Device ID */ + build_append_int_noprefix(table_data, 0xffff /* not a PCI device*/, 2); + /* PCI Vendor ID */ + build_append_int_noprefix(table_data, 0xffff /* not a PCI device*/, 2); + build_append_int_noprefix(table_data, 0, 1); /* PCI Bus Number */ + build_append_int_noprefix(table_data, 0, 1); /* PCI Device Number */ + build_append_int_noprefix(table_data, 0, 1); /* PCI Function Number */ + build_append_int_noprefix(table_data, 0, 4); /* PCI Flags */ + build_append_int_noprefix(table_data, 0, 1); /* PCI Segment */ + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ - spcr->base_address.space_id = AML_SYSTEM_MEMORY; - spcr->base_address.bit_width = 8; - spcr->base_address.bit_offset = 0; - spcr->base_address.access_width = 1; - spcr->base_address.address = cpu_to_le64(uart_memmap->base); - - spcr->interrupt_types = (1 << 3); /* Bit[3] ARMH GIC interrupt */ - spcr->gsi = cpu_to_le32(irq); /* Global System Interrupt */ - - spcr->baud = 3; /* Baud Rate: 3 = 9600 */ - spcr->parity = 0; /* No Parity */ - spcr->stopbits = 1; /* 1 Stop bit */ - spcr->flowctrl = (1 << 1); /* Bit[1] = RTS/CTS hardware flow control */ - spcr->term_type = 0; /* Terminal Type: 0 = VT100 */ - - spcr->pci_device_id = 0xffff; /* PCI Device ID: not a PCI device */ - spcr->pci_vendor_id = 0xffff; /* PCI Vendor ID: not a PCI device */ - - build_header(linker, table_data, (void *)(table_data->data + spcr_start), - "SPCR", table_data->len - spcr_start, 2, vms->oem_id, - vms->oem_table_id); + acpi_table_end(linker, &table); } +/* + * ACPI spec, Revision 5.1 + * 5.2.16 System Resource Affinity Table (SRAT) + */ static void build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) { - AcpiSystemResourceAffinityTable *srat; - AcpiSratProcessorGiccAffinity *core; - AcpiSratMemoryAffinity *numamem; - int i, srat_start; + int i; uint64_t mem_base; MachineClass *mc = MACHINE_GET_CLASS(vms); MachineState *ms = MACHINE(vms); const CPUArchIdList *cpu_list = mc->possible_cpu_arch_ids(ms); + AcpiTable table = { .sig = "SRAT", .rev = 3, .oem_id = vms->oem_id, + .oem_table_id = vms->oem_table_id }; - srat_start = table_data->len; - srat = acpi_data_push(table_data, sizeof(*srat)); - srat->reserved1 = cpu_to_le32(1); + acpi_table_begin(&table, table_data); + build_append_int_noprefix(table_data, 1, 4); /* Reserved */ + build_append_int_noprefix(table_data, 0, 8); /* Reserved */ for (i = 0; i < cpu_list->len; ++i) { - core = acpi_data_push(table_data, sizeof(*core)); - core->type = ACPI_SRAT_PROCESSOR_GICC; - core->length = sizeof(*core); - core->proximity = cpu_to_le32(cpu_list->cpus[i].props.node_id); - core->acpi_processor_uid = cpu_to_le32(i); - core->flags = cpu_to_le32(1); + uint32_t nodeid = cpu_list->cpus[i].props.node_id; + /* + * 5.2.16.4 GICC Affinity Structure + */ + build_append_int_noprefix(table_data, 3, 1); /* Type */ + build_append_int_noprefix(table_data, 18, 1); /* Length */ + build_append_int_noprefix(table_data, nodeid, 4); /* Proximity Domain */ + build_append_int_noprefix(table_data, i, 4); /* ACPI Processor UID */ + /* Flags, Table 5-76 */ + build_append_int_noprefix(table_data, 1 /* Enabled */, 4); + build_append_int_noprefix(table_data, 0, 4); /* Clock Domain */ } mem_base = vms->memmap[VIRT_MEM].base; for (i = 0; i < ms->numa_state->num_nodes; ++i) { if (ms->numa_state->nodes[i].node_mem > 0) { - numamem = acpi_data_push(table_data, sizeof(*numamem)); - build_srat_memory(numamem, mem_base, + build_srat_memory(table_data, mem_base, ms->numa_state->nodes[i].node_mem, i, MEM_AFFINITY_ENABLED); mem_base += ms->numa_state->nodes[i].node_mem; @@ -515,142 +560,251 @@ build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) } if (ms->device_memory) { - numamem = acpi_data_push(table_data, sizeof *numamem); - build_srat_memory(numamem, ms->device_memory->base, + build_srat_memory(table_data, ms->device_memory->base, memory_region_size(&ms->device_memory->mr), ms->numa_state->num_nodes - 1, MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED); } - build_header(linker, table_data, (void *)(table_data->data + srat_start), - "SRAT", table_data->len - srat_start, 3, vms->oem_id, - vms->oem_table_id); + acpi_table_end(linker, &table); } -/* GTDT */ +/* + * ACPI spec, Revision 5.1 + * 5.2.24 Generic Timer Description Table (GTDT) + */ static void build_gtdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) { VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); - int gtdt_start = table_data->len; - AcpiGenericTimerTable *gtdt; - uint32_t irqflags; + /* + * Table 5-117 Flag Definitions + * set only "Timer interrupt Mode" and assume "Timer Interrupt + * polarity" bit as '0: Interrupt is Active high' + */ + uint32_t irqflags = vmc->claim_edge_triggered_timers ? + 1 : /* Interrupt is Edge triggered */ + 0; /* Interrupt is Level triggered */ + AcpiTable table = { .sig = "GTDT", .rev = 2, .oem_id = vms->oem_id, + .oem_table_id = vms->oem_table_id }; - if (vmc->claim_edge_triggered_timers) { - irqflags = ACPI_GTDT_INTERRUPT_MODE_EDGE; - } else { - irqflags = ACPI_GTDT_INTERRUPT_MODE_LEVEL; - } + acpi_table_begin(&table, table_data); - gtdt = acpi_data_push(table_data, sizeof *gtdt); - /* The interrupt values are the same with the device tree when adding 16 */ - gtdt->secure_el1_interrupt = cpu_to_le32(ARCH_TIMER_S_EL1_IRQ + 16); - gtdt->secure_el1_flags = cpu_to_le32(irqflags); + /* CntControlBase Physical Address */ + /* FIXME: invalid value, should be 0xFFFFFFFFFFFFFFFF if not impl. ? */ + build_append_int_noprefix(table_data, 0, 8); + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ + /* + * FIXME: clarify comment: + * The interrupt values are the same with the device tree when adding 16 + */ + /* Secure EL1 timer GSIV */ + build_append_int_noprefix(table_data, ARCH_TIMER_S_EL1_IRQ + 16, 4); + /* Secure EL1 timer Flags */ + build_append_int_noprefix(table_data, irqflags, 4); + /* Non-Secure EL1 timer GSIV */ + build_append_int_noprefix(table_data, ARCH_TIMER_NS_EL1_IRQ + 16, 4); + /* Non-Secure EL1 timer Flags */ + build_append_int_noprefix(table_data, irqflags | + 1UL << 2, /* Always-on Capability */ + 4); + /* Virtual timer GSIV */ + build_append_int_noprefix(table_data, ARCH_TIMER_VIRT_IRQ + 16, 4); + /* Virtual Timer Flags */ + build_append_int_noprefix(table_data, irqflags, 4); + /* Non-Secure EL2 timer GSIV */ + build_append_int_noprefix(table_data, ARCH_TIMER_NS_EL2_IRQ + 16, 4); + /* Non-Secure EL2 timer Flags */ + build_append_int_noprefix(table_data, irqflags, 4); + /* CntReadBase Physical address */ + build_append_int_noprefix(table_data, 0, 8); + /* Platform Timer Count */ + build_append_int_noprefix(table_data, 0, 4); + /* Platform Timer Offset */ + build_append_int_noprefix(table_data, 0, 4); - gtdt->non_secure_el1_interrupt = cpu_to_le32(ARCH_TIMER_NS_EL1_IRQ + 16); - gtdt->non_secure_el1_flags = cpu_to_le32(irqflags | - ACPI_GTDT_CAP_ALWAYS_ON); - - gtdt->virtual_timer_interrupt = cpu_to_le32(ARCH_TIMER_VIRT_IRQ + 16); - gtdt->virtual_timer_flags = cpu_to_le32(irqflags); - - gtdt->non_secure_el2_interrupt = cpu_to_le32(ARCH_TIMER_NS_EL2_IRQ + 16); - gtdt->non_secure_el2_flags = cpu_to_le32(irqflags); - - build_header(linker, table_data, - (void *)(table_data->data + gtdt_start), "GTDT", - table_data->len - gtdt_start, 2, vms->oem_id, - vms->oem_table_id); + acpi_table_end(linker, &table); +} + +/* Debug Port Table 2 (DBG2) */ +static void +build_dbg2(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) +{ + AcpiTable table = { .sig = "DBG2", .rev = 0, .oem_id = vms->oem_id, + .oem_table_id = vms->oem_table_id }; + int dbg2devicelength; + const char name[] = "COM0"; + const int namespace_length = sizeof(name); + + acpi_table_begin(&table, table_data); + + dbg2devicelength = 22 + /* BaseAddressRegister[] offset */ + 12 + /* BaseAddressRegister[] */ + 4 + /* AddressSize[] */ + namespace_length /* NamespaceString[] */; + + /* OffsetDbgDeviceInfo */ + build_append_int_noprefix(table_data, 44, 4); + /* NumberDbgDeviceInfo */ + build_append_int_noprefix(table_data, 1, 4); + + /* Table 2. Debug Device Information structure format */ + build_append_int_noprefix(table_data, 0, 1); /* Revision */ + build_append_int_noprefix(table_data, dbg2devicelength, 2); /* Length */ + /* NumberofGenericAddressRegisters */ + build_append_int_noprefix(table_data, 1, 1); + /* NameSpaceStringLength */ + build_append_int_noprefix(table_data, namespace_length, 2); + build_append_int_noprefix(table_data, 38, 2); /* NameSpaceStringOffset */ + build_append_int_noprefix(table_data, 0, 2); /* OemDataLength */ + /* OemDataOffset (0 means no OEM data) */ + build_append_int_noprefix(table_data, 0, 2); + + /* Port Type */ + build_append_int_noprefix(table_data, 0x8000 /* Serial */, 2); + /* Port Subtype */ + build_append_int_noprefix(table_data, 0x3 /* ARM PL011 UART */, 2); + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + /* BaseAddressRegisterOffset */ + build_append_int_noprefix(table_data, 22, 2); + /* AddressSizeOffset */ + build_append_int_noprefix(table_data, 34, 2); + + /* BaseAddressRegister[] */ + build_append_gas(table_data, AML_AS_SYSTEM_MEMORY, 8, 0, 1, + vms->memmap[VIRT_UART].base); + + /* AddressSize[] */ + build_append_int_noprefix(table_data, + vms->memmap[VIRT_UART].size, 4); + + /* NamespaceString[] */ + g_array_append_vals(table_data, name, namespace_length); + + acpi_table_end(linker, &table); +}; + +/* + * ACPI spec, Revision 5.1 Errata A + * 5.2.12 Multiple APIC Description Table (MADT) + */ +static void build_append_gicr(GArray *table_data, uint64_t base, uint32_t size) +{ + build_append_int_noprefix(table_data, 0xE, 1); /* Type */ + build_append_int_noprefix(table_data, 16, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + /* Discovery Range Base Addres */ + build_append_int_noprefix(table_data, base, 8); + build_append_int_noprefix(table_data, size, 4); /* Discovery Range Length */ } -/* MADT */ static void build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) { - VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); - int madt_start = table_data->len; - const MemMapEntry *memmap = vms->memmap; - const int *irqmap = vms->irqmap; - AcpiMadtGenericDistributor *gicd; - AcpiMadtGenericMsiFrame *gic_msi; int i; + VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); + const MemMapEntry *memmap = vms->memmap; + AcpiTable table = { .sig = "APIC", .rev = 3, .oem_id = vms->oem_id, + .oem_table_id = vms->oem_table_id }; - acpi_data_push(table_data, sizeof(AcpiMultipleApicTable)); + acpi_table_begin(&table, table_data); + /* Local Interrupt Controller Address */ + build_append_int_noprefix(table_data, 0, 4); + build_append_int_noprefix(table_data, 0, 4); /* Flags */ - gicd = acpi_data_push(table_data, sizeof *gicd); - gicd->type = ACPI_APIC_GENERIC_DISTRIBUTOR; - gicd->length = sizeof(*gicd); - gicd->base_address = cpu_to_le64(memmap[VIRT_GIC_DIST].base); - gicd->version = vms->gic_version; + /* 5.2.12.15 GIC Distributor Structure */ + build_append_int_noprefix(table_data, 0xC, 1); /* Type */ + build_append_int_noprefix(table_data, 24, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); /* GIC ID */ + /* Physical Base Address */ + build_append_int_noprefix(table_data, memmap[VIRT_GIC_DIST].base, 8); + build_append_int_noprefix(table_data, 0, 4); /* System Vector Base */ + /* GIC version */ + build_append_int_noprefix(table_data, vms->gic_version, 1); + build_append_int_noprefix(table_data, 0, 3); /* Reserved */ for (i = 0; i < MACHINE(vms)->smp.cpus; i++) { - AcpiMadtGenericCpuInterface *gicc = acpi_data_push(table_data, - sizeof(*gicc)); ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(i)); + uint64_t physical_base_address = 0, gich = 0, gicv = 0; + uint32_t vgic_interrupt = vms->virt ? PPI(ARCH_GIC_MAINT_IRQ) : 0; + uint32_t pmu_interrupt = arm_feature(&armcpu->env, ARM_FEATURE_PMU) ? + PPI(VIRTUAL_PMU_IRQ) : 0; - gicc->type = ACPI_APIC_GENERIC_CPU_INTERFACE; - gicc->length = sizeof(*gicc); if (vms->gic_version == 2) { - gicc->base_address = cpu_to_le64(memmap[VIRT_GIC_CPU].base); - gicc->gich_base_address = cpu_to_le64(memmap[VIRT_GIC_HYP].base); - gicc->gicv_base_address = cpu_to_le64(memmap[VIRT_GIC_VCPU].base); + physical_base_address = memmap[VIRT_GIC_CPU].base; + gicv = memmap[VIRT_GIC_VCPU].base; + gich = memmap[VIRT_GIC_HYP].base; } - gicc->cpu_interface_number = cpu_to_le32(i); - gicc->arm_mpidr = cpu_to_le64(armcpu->mp_affinity); - gicc->uid = cpu_to_le32(i); - gicc->flags = cpu_to_le32(ACPI_MADT_GICC_ENABLED); - if (arm_feature(&armcpu->env, ARM_FEATURE_PMU)) { - gicc->performance_interrupt = cpu_to_le32(PPI(VIRTUAL_PMU_IRQ)); - } - if (vms->virt) { - gicc->vgic_interrupt = cpu_to_le32(PPI(ARCH_GIC_MAINT_IRQ)); - } + /* 5.2.12.14 GIC Structure */ + build_append_int_noprefix(table_data, 0xB, 1); /* Type */ + build_append_int_noprefix(table_data, 76, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + build_append_int_noprefix(table_data, i, 4); /* GIC ID */ + build_append_int_noprefix(table_data, i, 4); /* ACPI Processor UID */ + /* Flags */ + build_append_int_noprefix(table_data, 1, 4); /* Enabled */ + /* Parking Protocol Version */ + build_append_int_noprefix(table_data, 0, 4); + /* Performance Interrupt GSIV */ + build_append_int_noprefix(table_data, pmu_interrupt, 4); + build_append_int_noprefix(table_data, 0, 8); /* Parked Address */ + /* Physical Base Address */ + build_append_int_noprefix(table_data, physical_base_address, 8); + build_append_int_noprefix(table_data, gicv, 8); /* GICV */ + build_append_int_noprefix(table_data, gich, 8); /* GICH */ + /* VGIC Maintenance interrupt */ + build_append_int_noprefix(table_data, vgic_interrupt, 4); + build_append_int_noprefix(table_data, 0, 8); /* GICR Base Address*/ + /* MPIDR */ + build_append_int_noprefix(table_data, armcpu->mp_affinity, 8); } if (vms->gic_version == 3) { - AcpiMadtGenericTranslator *gic_its; - int nb_redist_regions = virt_gicv3_redist_region_count(vms); - AcpiMadtGenericRedistributor *gicr = acpi_data_push(table_data, - sizeof *gicr); - - gicr->type = ACPI_APIC_GENERIC_REDISTRIBUTOR; - gicr->length = sizeof(*gicr); - gicr->base_address = cpu_to_le64(memmap[VIRT_GIC_REDIST].base); - gicr->range_length = cpu_to_le32(memmap[VIRT_GIC_REDIST].size); - - if (nb_redist_regions == 2) { - gicr = acpi_data_push(table_data, sizeof(*gicr)); - gicr->type = ACPI_APIC_GENERIC_REDISTRIBUTOR; - gicr->length = sizeof(*gicr); - gicr->base_address = - cpu_to_le64(memmap[VIRT_HIGH_GIC_REDIST2].base); - gicr->range_length = - cpu_to_le32(memmap[VIRT_HIGH_GIC_REDIST2].size); + build_append_gicr(table_data, memmap[VIRT_GIC_REDIST].base, + memmap[VIRT_GIC_REDIST].size); + if (virt_gicv3_redist_region_count(vms) == 2) { + build_append_gicr(table_data, memmap[VIRT_HIGH_GIC_REDIST2].base, + memmap[VIRT_HIGH_GIC_REDIST2].size); } if (its_class_name() && !vmc->no_its) { - gic_its = acpi_data_push(table_data, sizeof *gic_its); - gic_its->type = ACPI_APIC_GENERIC_TRANSLATOR; - gic_its->length = sizeof(*gic_its); - gic_its->translation_id = 0; - gic_its->base_address = cpu_to_le64(memmap[VIRT_GIC_ITS].base); + /* + * FIXME: Structure is from Revision 6.0 where 'GIC Structure' + * has additional fields on top of implemented 5.1 Errata A, + * to make it consistent with v6.0 we need to bump everything + * to v6.0 + */ + /* + * ACPI spec, Revision 6.0 Errata A + * (original 6.0 definition has invalid Length) + * 5.2.12.18 GIC ITS Structure + */ + build_append_int_noprefix(table_data, 0xF, 1); /* Type */ + build_append_int_noprefix(table_data, 20, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); /* GIC ITS ID */ + /* Physical Base Address */ + build_append_int_noprefix(table_data, memmap[VIRT_GIC_ITS].base, 8); + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ } } else { - gic_msi = acpi_data_push(table_data, sizeof *gic_msi); - gic_msi->type = ACPI_APIC_GENERIC_MSI_FRAME; - gic_msi->length = sizeof(*gic_msi); - gic_msi->gic_msi_frame_id = 0; - gic_msi->base_address = cpu_to_le64(memmap[VIRT_GIC_V2M].base); - gic_msi->flags = cpu_to_le32(1); - gic_msi->spi_count = cpu_to_le16(NUM_GICV2M_SPIS); - gic_msi->spi_base = cpu_to_le16(irqmap[VIRT_GIC_V2M] + ARM_SPI_BASE); - } + const uint16_t spi_base = vms->irqmap[VIRT_GIC_V2M] + ARM_SPI_BASE; - build_header(linker, table_data, - (void *)(table_data->data + madt_start), "APIC", - table_data->len - madt_start, 3, vms->oem_id, - vms->oem_table_id); + /* 5.2.12.16 GIC MSI Frame Structure */ + build_append_int_noprefix(table_data, 0xD, 1); /* Type */ + build_append_int_noprefix(table_data, 24, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + build_append_int_noprefix(table_data, 0, 4); /* GIC MSI Frame ID */ + /* Physical Base Address */ + build_append_int_noprefix(table_data, memmap[VIRT_GIC_V2M].base, 8); + build_append_int_noprefix(table_data, 1, 4); /* Flags */ + /* SPI Count */ + build_append_int_noprefix(table_data, NUM_GICV2M_SPIS, 2); + build_append_int_noprefix(table_data, spi_base, 2); /* SPI Base */ + } + acpi_table_end(linker, &table); } /* FADT */ @@ -692,10 +846,11 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) MachineState *ms = MACHINE(vms); const MemMapEntry *memmap = vms->memmap; const int *irqmap = vms->irqmap; + AcpiTable table = { .sig = "DSDT", .rev = 2, .oem_id = vms->oem_id, + .oem_table_id = vms->oem_table_id }; + acpi_table_begin(&table, table_data); dsdt = init_aml_allocator(); - /* Reserve space for header */ - acpi_data_push(dsdt->buf, sizeof(AcpiTableHeader)); /* When booting the VM with UEFI, UEFI takes ownership of the RTC hardware. * While UEFI can use libfdt to disable the RTC device node in the DTB that @@ -742,12 +897,10 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) aml_append(dsdt, scope); - /* copy AML table into ACPI tables blob and patch header there */ + /* copy AML table into ACPI tables blob */ g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len); - build_header(linker, table_data, - (void *)(table_data->data + table_data->len - dsdt->buf->len), - "DSDT", dsdt->buf->len, 2, vms->oem_id, - vms->oem_table_id); + + acpi_table_end(linker, &table); free_aml_allocator(); } @@ -790,13 +943,19 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables) dsdt = tables_blob->len; build_dsdt(tables_blob, tables->linker, vms); - /* FADT MADT GTDT MCFG SPCR pointed to by RSDT */ + /* FADT MADT PPTT GTDT MCFG SPCR DBG2 pointed to by RSDT */ acpi_add_table(table_offsets, tables_blob); build_fadt_rev5(tables_blob, tables->linker, vms, dsdt); acpi_add_table(table_offsets, tables_blob); build_madt(tables_blob, tables->linker, vms); + if (!vmc->no_cpu_topology) { + acpi_add_table(table_offsets, tables_blob); + build_pptt(tables_blob, tables->linker, ms, + vms->oem_id, vms->oem_table_id); + } + acpi_add_table(table_offsets, tables_blob); build_gtdt(tables_blob, tables->linker, vms); @@ -813,6 +972,9 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables) acpi_add_table(table_offsets, tables_blob); build_spcr(tables_blob, tables->linker, vms); + acpi_add_table(table_offsets, tables_blob); + build_dbg2(tables_blob, tables->linker, vms); + if (vms->ras) { build_ghes_error_table(tables->hardware_errors, tables->linker); acpi_add_table(table_offsets, tables_blob); diff --git a/hw/arm/virt.c b/hw/arm/virt.c index 1d59f0e59f..369552ad45 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -351,20 +351,21 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms) int cpu; int addr_cells = 1; const MachineState *ms = MACHINE(vms); + const VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); int smp_cpus = ms->smp.cpus; /* - * From Documentation/devicetree/bindings/arm/cpus.txt - * On ARM v8 64-bit systems value should be set to 2, - * that corresponds to the MPIDR_EL1 register size. - * If MPIDR_EL1[63:32] value is equal to 0 on all CPUs - * in the system, #address-cells can be set to 1, since - * MPIDR_EL1[63:32] bits are not used for CPUs - * identification. + * See Linux Documentation/devicetree/bindings/arm/cpus.yaml + * On ARM v8 64-bit systems value should be set to 2, + * that corresponds to the MPIDR_EL1 register size. + * If MPIDR_EL1[63:32] value is equal to 0 on all CPUs + * in the system, #address-cells can be set to 1, since + * MPIDR_EL1[63:32] bits are not used for CPUs + * identification. * - * Here we actually don't know whether our system is 32- or 64-bit one. - * The simplest way to go is to examine affinity IDs of all our CPUs. If - * at least one of them has Aff3 populated, we set #address-cells to 2. + * Here we actually don't know whether our system is 32- or 64-bit one. + * The simplest way to go is to examine affinity IDs of all our CPUs. If + * at least one of them has Aff3 populated, we set #address-cells to 2. */ for (cpu = 0; cpu < smp_cpus; cpu++) { ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu)); @@ -407,8 +408,57 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms) ms->possible_cpus->cpus[cs->cpu_index].props.node_id); } + if (!vmc->no_cpu_topology) { + qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", + qemu_fdt_alloc_phandle(ms->fdt)); + } + g_free(nodename); } + + if (!vmc->no_cpu_topology) { + /* + * Add vCPU topology description through fdt node cpu-map. + * + * See Linux Documentation/devicetree/bindings/cpu/cpu-topology.txt + * In a SMP system, the hierarchy of CPUs can be defined through + * four entities that are used to describe the layout of CPUs in + * the system: socket/cluster/core/thread. + * + * A socket node represents the boundary of system physical package + * and its child nodes must be one or more cluster nodes. A system + * can contain several layers of clustering within a single physical + * package and cluster nodes can be contained in parent cluster nodes. + * + * Given that cluster is not yet supported in the vCPU topology, + * we currently generate one cluster node within each socket node + * by default. + */ + qemu_fdt_add_subnode(ms->fdt, "/cpus/cpu-map"); + + for (cpu = smp_cpus - 1; cpu >= 0; cpu--) { + char *cpu_path = g_strdup_printf("/cpus/cpu@%d", cpu); + char *map_path; + + if (ms->smp.threads > 1) { + map_path = g_strdup_printf( + "/cpus/cpu-map/socket%d/cluster0/core%d/thread%d", + cpu / (ms->smp.cores * ms->smp.threads), + (cpu / ms->smp.threads) % ms->smp.cores, + cpu % ms->smp.threads); + } else { + map_path = g_strdup_printf( + "/cpus/cpu-map/socket%d/cluster0/core%d", + cpu / ms->smp.cores, + cpu % ms->smp.cores); + } + qemu_fdt_add_path(ms->fdt, map_path); + qemu_fdt_setprop_phandle(ms->fdt, map_path, "cpu", cpu_path); + + g_free(map_path); + g_free(cpu_path); + } + } } static void fdt_add_its_gic_node(VirtMachineState *vms) @@ -1459,7 +1509,7 @@ static void create_platform_bus(VirtMachineState *vms) MemoryRegion *sysmem = get_system_memory(); dev = qdev_new(TYPE_PLATFORM_BUS_DEVICE); - dev->id = TYPE_PLATFORM_BUS_DEVICE; + dev->id = g_strdup(TYPE_PLATFORM_BUS_DEVICE); qdev_prop_set_uint32(dev, "num_irqs", PLATFORM_BUS_NUM_IRQS); qdev_prop_set_uint32(dev, "mmio_size", vms->memmap[VIRT_PLATFORM_BUS].size); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); @@ -2687,10 +2737,10 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) "Set the IOMMU type. " "Valid values are none and smmuv3"); - object_class_property_add_bool(oc, "default_bus_bypass_iommu", + object_class_property_add_bool(oc, "default-bus-bypass-iommu", virt_get_default_bus_bypass_iommu, virt_set_default_bus_bypass_iommu); - object_class_property_set_description(oc, "default_bus_bypass_iommu", + object_class_property_set_description(oc, "default-bus-bypass-iommu", "Set on/off to enable/disable " "bypass_iommu for default root bus"); @@ -2815,6 +2865,8 @@ static void virt_machine_6_1_options(MachineClass *mc) virt_machine_6_2_options(mc); compat_props_add(mc->compat_props, hw_compat_6_1, hw_compat_6_1_len); + mc->smp_props.prefer_sockets = true; + vmc->no_cpu_topology = true; /* qemu ITS was introduced with 6.2 */ vmc->no_tcg_its = true; diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c index 5bca360dce..d2f55e29b6 100644 --- a/hw/arm/xlnx-versal-virt.c +++ b/hw/arm/xlnx-versal-virt.c @@ -356,6 +356,61 @@ static void fdt_add_rtc_node(VersalVirt *s) g_free(name); } +static void fdt_add_bbram_node(VersalVirt *s) +{ + const char compat[] = TYPE_XLNX_BBRAM; + const char interrupt_names[] = "bbram-error"; + char *name = g_strdup_printf("/bbram@%x", MM_PMC_BBRAM_CTRL); + + qemu_fdt_add_subnode(s->fdt, name); + + qemu_fdt_setprop_cells(s->fdt, name, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, VERSAL_BBRAM_APB_IRQ_0, + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + qemu_fdt_setprop(s->fdt, name, "interrupt-names", + interrupt_names, sizeof(interrupt_names)); + qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", + 2, MM_PMC_BBRAM_CTRL, + 2, MM_PMC_BBRAM_CTRL_SIZE); + qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat)); + g_free(name); +} + +static void fdt_add_efuse_ctrl_node(VersalVirt *s) +{ + const char compat[] = TYPE_XLNX_VERSAL_EFUSE_CTRL; + const char interrupt_names[] = "pmc_efuse"; + char *name = g_strdup_printf("/pmc_efuse@%x", MM_PMC_EFUSE_CTRL); + + qemu_fdt_add_subnode(s->fdt, name); + + qemu_fdt_setprop_cells(s->fdt, name, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, VERSAL_EFUSE_IRQ, + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + qemu_fdt_setprop(s->fdt, name, "interrupt-names", + interrupt_names, sizeof(interrupt_names)); + qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", + 2, MM_PMC_EFUSE_CTRL, + 2, MM_PMC_EFUSE_CTRL_SIZE); + qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat)); + g_free(name); +} + +static void fdt_add_efuse_cache_node(VersalVirt *s) +{ + const char compat[] = TYPE_XLNX_VERSAL_EFUSE_CACHE; + char *name = g_strdup_printf("/xlnx_pmc_efuse_cache@%x", + MM_PMC_EFUSE_CACHE); + + qemu_fdt_add_subnode(s->fdt, name); + + qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", + 2, MM_PMC_EFUSE_CACHE, + 2, MM_PMC_EFUSE_CACHE_SIZE); + qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat)); + g_free(name); +} + static void fdt_nop_memory_nodes(void *fdt, Error **errp) { Error *err = NULL; @@ -510,6 +565,30 @@ static void create_virtio_regions(VersalVirt *s) } } +static void bbram_attach_drive(XlnxBBRam *dev) +{ + DriveInfo *dinfo; + BlockBackend *blk; + + dinfo = drive_get_by_index(IF_PFLASH, 0); + blk = dinfo ? blk_by_legacy_dinfo(dinfo) : NULL; + if (blk) { + qdev_prop_set_drive(DEVICE(dev), "drive", blk); + } +} + +static void efuse_attach_drive(XlnxEFuse *dev) +{ + DriveInfo *dinfo; + BlockBackend *blk; + + dinfo = drive_get_by_index(IF_PFLASH, 1); + blk = dinfo ? blk_by_legacy_dinfo(dinfo) : NULL; + if (blk) { + qdev_prop_set_drive(DEVICE(dev), "drive", blk); + } +} + static void sd_plugin_card(SDHCIState *sd, DriveInfo *di) { BlockBackend *blk = di ? blk_by_legacy_dinfo(di) : NULL; @@ -570,6 +649,9 @@ static void versal_virt_init(MachineState *machine) fdt_add_usb_xhci_nodes(s); fdt_add_sd_nodes(s); fdt_add_rtc_node(s); + fdt_add_bbram_node(s); + fdt_add_efuse_ctrl_node(s); + fdt_add_efuse_cache_node(s); fdt_add_cpu_nodes(s, psci_conduit); fdt_add_clk_node(s, "/clk125", 125000000, s->phandle.clk_125Mhz); fdt_add_clk_node(s, "/clk25", 25000000, s->phandle.clk_25Mhz); @@ -579,6 +661,12 @@ static void versal_virt_init(MachineState *machine) memory_region_add_subregion_overlap(get_system_memory(), 0, &s->soc.fpd.apu.mr, 0); + /* Attach bbram backend, if given */ + bbram_attach_drive(&s->soc.pmc.bbram); + + /* Attach efuse backend, if given */ + efuse_attach_drive(&s->soc.pmc.efuse); + /* Plugin SD cards. */ for (i = 0; i < ARRAY_SIZE(s->soc.pmc.iou.sd); i++) { sd_plugin_card(&s->soc.pmc.iou.sd[i], drive_get_next(IF_SD)); diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c index 547a26603a..b2705b6925 100644 --- a/hw/arm/xlnx-versal.c +++ b/hw/arm/xlnx-versal.c @@ -314,6 +314,61 @@ static void versal_create_xrams(Versal *s, qemu_irq *pic) } } +static void versal_create_bbram(Versal *s, qemu_irq *pic) +{ + SysBusDevice *sbd; + + object_initialize_child_with_props(OBJECT(s), "bbram", &s->pmc.bbram, + sizeof(s->pmc.bbram), TYPE_XLNX_BBRAM, + &error_fatal, + "crc-zpads", "0", + NULL); + sbd = SYS_BUS_DEVICE(&s->pmc.bbram); + + sysbus_realize(sbd, &error_fatal); + memory_region_add_subregion(&s->mr_ps, MM_PMC_BBRAM_CTRL, + sysbus_mmio_get_region(sbd, 0)); + sysbus_connect_irq(sbd, 0, pic[VERSAL_BBRAM_APB_IRQ_0]); +} + +static void versal_realize_efuse_part(Versal *s, Object *dev, hwaddr base) +{ + SysBusDevice *part = SYS_BUS_DEVICE(dev); + + object_property_set_link(OBJECT(part), "efuse", + OBJECT(&s->pmc.efuse), &error_abort); + + sysbus_realize(part, &error_abort); + memory_region_add_subregion(&s->mr_ps, base, + sysbus_mmio_get_region(part, 0)); +} + +static void versal_create_efuse(Versal *s, qemu_irq *pic) +{ + Object *bits = OBJECT(&s->pmc.efuse); + Object *ctrl = OBJECT(&s->pmc.efuse_ctrl); + Object *cache = OBJECT(&s->pmc.efuse_cache); + + object_initialize_child(OBJECT(s), "efuse-ctrl", &s->pmc.efuse_ctrl, + TYPE_XLNX_VERSAL_EFUSE_CTRL); + + object_initialize_child(OBJECT(s), "efuse-cache", &s->pmc.efuse_cache, + TYPE_XLNX_VERSAL_EFUSE_CACHE); + + object_initialize_child_with_props(ctrl, "xlnx-efuse@0", bits, + sizeof(s->pmc.efuse), + TYPE_XLNX_EFUSE, &error_abort, + "efuse-nr", "3", + "efuse-size", "8192", + NULL); + + qdev_realize(DEVICE(bits), NULL, &error_abort); + versal_realize_efuse_part(s, ctrl, MM_PMC_EFUSE_CTRL); + versal_realize_efuse_part(s, cache, MM_PMC_EFUSE_CACHE); + + sysbus_connect_irq(SYS_BUS_DEVICE(ctrl), 0, pic[VERSAL_EFUSE_IRQ]); +} + /* This takes the board allocated linear DDR memory and creates aliases * for each split DDR range/aperture on the Versal address map. */ @@ -402,6 +457,8 @@ static void versal_realize(DeviceState *dev, Error **errp) versal_create_sds(s, pic); versal_create_rtc(s, pic); versal_create_xrams(s, pic); + versal_create_bbram(s, pic); + versal_create_efuse(s, pic); versal_map_ddr(s); versal_unimp(s); diff --git a/hw/arm/xlnx-zcu102.c b/hw/arm/xlnx-zcu102.c index 6c6cb02e86..3dc2b5e8ca 100644 --- a/hw/arm/xlnx-zcu102.c +++ b/hw/arm/xlnx-zcu102.c @@ -98,6 +98,30 @@ static void zcu102_modify_dtb(const struct arm_boot_info *binfo, void *fdt) } } +static void bbram_attach_drive(XlnxBBRam *dev) +{ + DriveInfo *dinfo; + BlockBackend *blk; + + dinfo = drive_get_by_index(IF_PFLASH, 2); + blk = dinfo ? blk_by_legacy_dinfo(dinfo) : NULL; + if (blk) { + qdev_prop_set_drive(DEVICE(dev), "drive", blk); + } +} + +static void efuse_attach_drive(XlnxEFuse *dev) +{ + DriveInfo *dinfo; + BlockBackend *blk; + + dinfo = drive_get_by_index(IF_PFLASH, 3); + blk = dinfo ? blk_by_legacy_dinfo(dinfo) : NULL; + if (blk) { + qdev_prop_set_drive(DEVICE(dev), "drive", blk); + } +} + static void xlnx_zcu102_init(MachineState *machine) { XlnxZCU102 *s = ZCU102_MACHINE(machine); @@ -136,6 +160,12 @@ static void xlnx_zcu102_init(MachineState *machine) qdev_realize(DEVICE(&s->soc), NULL, &error_fatal); + /* Attach bbram backend, if given */ + bbram_attach_drive(&s->soc.bbram); + + /* Attach efuse backend, if given */ + efuse_attach_drive(&s->soc.efuse); + /* Create and plug in the SD cards */ for (i = 0; i < XLNX_ZYNQMP_NUM_SDHCI; i++) { BusState *bus; diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c index 4e5a471e30..1c52a575aa 100644 --- a/hw/arm/xlnx-zynqmp.c +++ b/hw/arm/xlnx-zynqmp.c @@ -66,6 +66,12 @@ #define RTC_ADDR 0xffa60000 #define RTC_IRQ 26 +#define BBRAM_ADDR 0xffcd0000 +#define BBRAM_IRQ 11 + +#define EFUSE_ADDR 0xffcc0000 +#define EFUSE_IRQ 87 + #define SDHCI_CAPABILITIES 0x280737ec6481 /* Datasheet: UG1085 (v1.7) */ static const uint64_t gem_addr[XLNX_ZYNQMP_NUM_GEMS] = { @@ -226,6 +232,47 @@ static void xlnx_zynqmp_create_rpu(MachineState *ms, XlnxZynqMPState *s, qdev_realize(DEVICE(&s->rpu_cluster), NULL, &error_fatal); } +static void xlnx_zynqmp_create_bbram(XlnxZynqMPState *s, qemu_irq *gic) +{ + SysBusDevice *sbd; + + object_initialize_child_with_props(OBJECT(s), "bbram", &s->bbram, + sizeof(s->bbram), TYPE_XLNX_BBRAM, + &error_fatal, + "crc-zpads", "1", + NULL); + sbd = SYS_BUS_DEVICE(&s->bbram); + + sysbus_realize(sbd, &error_fatal); + sysbus_mmio_map(sbd, 0, BBRAM_ADDR); + sysbus_connect_irq(sbd, 0, gic[BBRAM_IRQ]); +} + +static void xlnx_zynqmp_create_efuse(XlnxZynqMPState *s, qemu_irq *gic) +{ + Object *bits = OBJECT(&s->efuse); + Object *ctrl = OBJECT(&s->efuse_ctrl); + SysBusDevice *sbd; + + object_initialize_child(OBJECT(s), "efuse-ctrl", &s->efuse_ctrl, + TYPE_XLNX_ZYNQMP_EFUSE); + + object_initialize_child_with_props(ctrl, "xlnx-efuse@0", bits, + sizeof(s->efuse), + TYPE_XLNX_EFUSE, &error_abort, + "efuse-nr", "3", + "efuse-size", "2048", + NULL); + + qdev_realize(DEVICE(bits), NULL, &error_abort); + object_property_set_link(ctrl, "efuse", bits, &error_abort); + + sbd = SYS_BUS_DEVICE(ctrl); + sysbus_realize(sbd, &error_abort); + sysbus_mmio_map(sbd, 0, EFUSE_ADDR); + sysbus_connect_irq(sbd, 0, gic[EFUSE_IRQ]); +} + static void xlnx_zynqmp_create_unimp_mmio(XlnxZynqMPState *s) { static const struct UnimpInfo { @@ -626,6 +673,8 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp) sysbus_mmio_map(SYS_BUS_DEVICE(&s->rtc), 0, RTC_ADDR); sysbus_connect_irq(SYS_BUS_DEVICE(&s->rtc), 0, gic_spi[RTC_IRQ]); + xlnx_zynqmp_create_bbram(s, gic_spi); + xlnx_zynqmp_create_efuse(s, gic_spi); xlnx_zynqmp_create_unimp_mmio(s); for (i = 0; i < XLNX_ZYNQMP_NUM_GDMA_CH; i++) { diff --git a/hw/audio/intel-hda.c b/hw/audio/intel-hda.c index 4330213fff..8ce9df64e3 100644 --- a/hw/audio/intel-hda.c +++ b/hw/audio/intel-hda.c @@ -52,7 +52,7 @@ void hda_codec_bus_init(DeviceState *dev, HDACodecBus *bus, size_t bus_size, hda_codec_response_func response, hda_codec_xfer_func xfer) { - qbus_create_inplace(bus, bus_size, TYPE_HDA_BUS, dev, NULL); + qbus_init(bus, bus_size, TYPE_HDA_BUS, dev, NULL); bus->response = response; bus->xfer = xfer; } diff --git a/hw/block/fdc.c b/hw/block/fdc.c index 9014cd30b3..fa933cd326 100644 --- a/hw/block/fdc.c +++ b/hw/block/fdc.c @@ -77,7 +77,7 @@ static const TypeInfo floppy_bus_info = { static void floppy_bus_create(FDCtrl *fdc, FloppyBus *bus, DeviceState *dev) { - qbus_create_inplace(bus, sizeof(FloppyBus), TYPE_FLOPPY_BUS, dev, NULL); + qbus_init(bus, sizeof(FloppyBus), TYPE_FLOPPY_BUS, dev, NULL); bus->fdc = fdc; } diff --git a/hw/block/swim.c b/hw/block/swim.c index 509c2f4900..333da08ce0 100644 --- a/hw/block/swim.c +++ b/hw/block/swim.c @@ -421,8 +421,7 @@ static void sysbus_swim_realize(DeviceState *dev, Error **errp) Swim *sys = SWIM(dev); SWIMCtrl *swimctrl = &sys->ctrl; - qbus_create_inplace(&swimctrl->bus, sizeof(SWIMBus), TYPE_SWIM_BUS, dev, - NULL); + qbus_init(&swimctrl->bus, sizeof(SWIMBus), TYPE_SWIM_BUS, dev, NULL); swimctrl->bus.ctrl = swimctrl; } diff --git a/hw/char/goldfish_tty.c b/hw/char/goldfish_tty.c index 8365a18761..20b77885c1 100644 --- a/hw/char/goldfish_tty.c +++ b/hw/char/goldfish_tty.c @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * Goldfish TTY * diff --git a/hw/char/ibex_uart.c b/hw/char/ibex_uart.c index 9b0a817713..e58181fcf4 100644 --- a/hw/char/ibex_uart.c +++ b/hw/char/ibex_uart.c @@ -550,6 +550,7 @@ static void ibex_uart_class_init(ObjectClass *klass, void *data) dc->realize = ibex_uart_realize; dc->vmsd = &vmstate_ibex_uart; device_class_set_props(dc, ibex_uart_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); } static const TypeInfo ibex_uart_info = { diff --git a/hw/char/mchp_pfsoc_mmuart.c b/hw/char/mchp_pfsoc_mmuart.c index 2facf85c2d..22f3e78eb9 100644 --- a/hw/char/mchp_pfsoc_mmuart.c +++ b/hw/char/mchp_pfsoc_mmuart.c @@ -22,20 +22,25 @@ #include "qemu/osdep.h" #include "qemu/log.h" -#include "chardev/char.h" +#include "qapi/error.h" +#include "migration/vmstate.h" #include "hw/char/mchp_pfsoc_mmuart.h" +#include "hw/qdev-properties.h" + +#define REGS_OFFSET 0x20 static uint64_t mchp_pfsoc_mmuart_read(void *opaque, hwaddr addr, unsigned size) { MchpPfSoCMMUartState *s = opaque; - if (addr >= MCHP_PFSOC_MMUART_REG_SIZE) { + addr >>= 2; + if (addr >= MCHP_PFSOC_MMUART_REG_COUNT) { qemu_log_mask(LOG_GUEST_ERROR, "%s: read: addr=0x%" HWADDR_PRIx "\n", - __func__, addr); + __func__, addr << 2); return 0; } - return s->reg[addr / sizeof(uint32_t)]; + return s->reg[addr]; } static void mchp_pfsoc_mmuart_write(void *opaque, hwaddr addr, @@ -44,13 +49,14 @@ static void mchp_pfsoc_mmuart_write(void *opaque, hwaddr addr, MchpPfSoCMMUartState *s = opaque; uint32_t val32 = (uint32_t)value; - if (addr >= MCHP_PFSOC_MMUART_REG_SIZE) { + addr >>= 2; + if (addr >= MCHP_PFSOC_MMUART_REG_COUNT) { qemu_log_mask(LOG_GUEST_ERROR, "%s: bad write: addr=0x%" HWADDR_PRIx - " v=0x%x\n", __func__, addr, val32); + " v=0x%x\n", __func__, addr << 2, val32); return; } - s->reg[addr / sizeof(uint32_t)] = val32; + s->reg[addr] = val32; } static const MemoryRegionOps mchp_pfsoc_mmuart_ops = { @@ -63,23 +69,95 @@ static const MemoryRegionOps mchp_pfsoc_mmuart_ops = { }, }; -MchpPfSoCMMUartState *mchp_pfsoc_mmuart_create(MemoryRegion *sysmem, - hwaddr base, qemu_irq irq, Chardev *chr) +static void mchp_pfsoc_mmuart_reset(DeviceState *dev) { - MchpPfSoCMMUartState *s; + MchpPfSoCMMUartState *s = MCHP_PFSOC_UART(dev); - s = g_new0(MchpPfSoCMMUartState, 1); - - memory_region_init_io(&s->iomem, NULL, &mchp_pfsoc_mmuart_ops, s, - "mchp.pfsoc.mmuart", 0x1000); - - s->base = base; - s->irq = irq; - - s->serial = serial_mm_init(sysmem, base, 2, irq, 399193, chr, - DEVICE_LITTLE_ENDIAN); - - memory_region_add_subregion(sysmem, base + 0x20, &s->iomem); - - return s; + memset(s->reg, 0, sizeof(s->reg)); + device_cold_reset(DEVICE(&s->serial_mm)); +} + +static void mchp_pfsoc_mmuart_init(Object *obj) +{ + MchpPfSoCMMUartState *s = MCHP_PFSOC_UART(obj); + + object_initialize_child(obj, "serial-mm", &s->serial_mm, TYPE_SERIAL_MM); + object_property_add_alias(obj, "chardev", OBJECT(&s->serial_mm), "chardev"); +} + +static void mchp_pfsoc_mmuart_realize(DeviceState *dev, Error **errp) +{ + MchpPfSoCMMUartState *s = MCHP_PFSOC_UART(dev); + + qdev_prop_set_uint8(DEVICE(&s->serial_mm), "regshift", 2); + qdev_prop_set_uint32(DEVICE(&s->serial_mm), "baudbase", 399193); + qdev_prop_set_uint8(DEVICE(&s->serial_mm), "endianness", + DEVICE_LITTLE_ENDIAN); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->serial_mm), errp)) { + return; + } + + sysbus_pass_irq(SYS_BUS_DEVICE(dev), SYS_BUS_DEVICE(&s->serial_mm)); + + memory_region_init(&s->container, OBJECT(s), "mchp.pfsoc.mmuart", 0x1000); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->container); + + memory_region_add_subregion(&s->container, 0, + sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->serial_mm), 0)); + + memory_region_init_io(&s->iomem, OBJECT(s), &mchp_pfsoc_mmuart_ops, s, + "mchp.pfsoc.mmuart.regs", 0x1000 - REGS_OFFSET); + memory_region_add_subregion(&s->container, REGS_OFFSET, &s->iomem); +} + +static const VMStateDescription mchp_pfsoc_mmuart_vmstate = { + .name = "mchp.pfsoc.uart", + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(reg, MchpPfSoCMMUartState, + MCHP_PFSOC_MMUART_REG_COUNT), + VMSTATE_END_OF_LIST() + } +}; + +static void mchp_pfsoc_mmuart_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = mchp_pfsoc_mmuart_realize; + dc->reset = mchp_pfsoc_mmuart_reset; + dc->vmsd = &mchp_pfsoc_mmuart_vmstate; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); +} + +static const TypeInfo mchp_pfsoc_mmuart_info = { + .name = TYPE_MCHP_PFSOC_UART, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(MchpPfSoCMMUartState), + .instance_init = mchp_pfsoc_mmuart_init, + .class_init = mchp_pfsoc_mmuart_class_init, +}; + +static void mchp_pfsoc_mmuart_register_types(void) +{ + type_register_static(&mchp_pfsoc_mmuart_info); +} + +type_init(mchp_pfsoc_mmuart_register_types) + +MchpPfSoCMMUartState *mchp_pfsoc_mmuart_create(MemoryRegion *sysmem, + hwaddr base, + qemu_irq irq, Chardev *chr) +{ + DeviceState *dev = qdev_new(TYPE_MCHP_PFSOC_UART); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + + qdev_prop_set_chr(dev, "chardev", chr); + sysbus_realize(sbd, &error_fatal); + + memory_region_add_subregion(sysmem, base, sysbus_mmio_get_region(sbd, 0)); + sysbus_connect_irq(sbd, 0, irq); + + return MCHP_PFSOC_UART(dev); } diff --git a/hw/char/sh_serial.c b/hw/char/sh_serial.c index 167f4d8cb9..355886ee3a 100644 --- a/hw/char/sh_serial.c +++ b/hw/char/sh_serial.c @@ -26,13 +26,17 @@ */ #include "qemu/osdep.h" +#include "hw/sysbus.h" #include "hw/irq.h" +#include "hw/qdev-core.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" #include "hw/sh4/sh.h" #include "chardev/char-fe.h" #include "qapi/error.h" #include "qemu/timer.h" - -//#define DEBUG_SERIAL +#include "qemu/log.h" +#include "trace.h" #define SH_SERIAL_FLAG_TEND (1 << 0) #define SH_SERIAL_FLAG_TDE (1 << 1) @@ -42,10 +46,10 @@ #define SH_RX_FIFO_LENGTH (16) -typedef struct { - MemoryRegion iomem; - MemoryRegion iomem_p4; - MemoryRegion iomem_a7; +OBJECT_DECLARE_SIMPLE_TYPE(SHSerialState, SH_SERIAL) + +struct SHSerialState { + SysBusDevice parent; uint8_t smr; uint8_t brr; uint8_t scr; @@ -59,13 +63,12 @@ typedef struct { uint8_t rx_tail; uint8_t rx_head; - int freq; - int feat; + uint8_t feat; int flags; int rtrg; CharBackend chr; - QEMUTimer *fifo_timeout_timer; + QEMUTimer fifo_timeout_timer; uint64_t etu; /* Elementary Time Unit (ns) */ qemu_irq eri; @@ -73,9 +76,13 @@ typedef struct { qemu_irq txi; qemu_irq tei; qemu_irq bri; -} sh_serial_state; +}; -static void sh_serial_clear_fifo(sh_serial_state * s) +typedef struct {} SHSerialStateClass; + +OBJECT_DEFINE_TYPE(SHSerialState, sh_serial, SH_SERIAL, SYS_BUS_DEVICE) + +static void sh_serial_clear_fifo(SHSerialState *s) { memset(s->rx_fifo, 0, SH_RX_FIFO_LENGTH); s->rx_cnt = 0; @@ -86,14 +93,12 @@ static void sh_serial_clear_fifo(sh_serial_state * s) static void sh_serial_write(void *opaque, hwaddr offs, uint64_t val, unsigned size) { - sh_serial_state *s = opaque; + SHSerialState *s = opaque; + DeviceState *d = DEVICE(s); unsigned char ch; -#ifdef DEBUG_SERIAL - printf("sh_serial: write offs=0x%02x val=0x%02x\n", - offs, val); -#endif - switch(offs) { + trace_sh_serial_write(d->id, size, offs, val); + switch (offs) { case 0x00: /* SMR */ s->smr = val & ((s->feat & SH_SERIAL_FEAT_SCIF) ? 0x7b : 0xff); return; @@ -103,8 +108,9 @@ static void sh_serial_write(void *opaque, hwaddr offs, case 0x08: /* SCR */ /* TODO : For SH7751, SCIF mask should be 0xfb. */ s->scr = val & ((s->feat & SH_SERIAL_FEAT_SCIF) ? 0xfa : 0xff); - if (!(val & (1 << 5))) + if (!(val & (1 << 5))) { s->flags |= SH_SERIAL_FLAG_TEND; + } if ((s->feat & SH_SERIAL_FEAT_SCIF) && s->txi) { qemu_set_irq(s->txi, val & (1 << 7)); } @@ -115,8 +121,10 @@ static void sh_serial_write(void *opaque, hwaddr offs, case 0x0c: /* FTDR / TDR */ if (qemu_chr_fe_backend_connected(&s->chr)) { ch = val; - /* XXX this blocks entire thread. Rewrite to use - * qemu_chr_fe_write and background I/O callbacks */ + /* + * XXX this blocks entire thread. Rewrite to use + * qemu_chr_fe_write and background I/O callbacks + */ qemu_chr_fe_write_all(&s->chr, &ch, 1); } s->dr = val; @@ -129,18 +137,23 @@ static void sh_serial_write(void *opaque, hwaddr offs, #endif } if (s->feat & SH_SERIAL_FEAT_SCIF) { - switch(offs) { + switch (offs) { case 0x10: /* FSR */ - if (!(val & (1 << 6))) + if (!(val & (1 << 6))) { s->flags &= ~SH_SERIAL_FLAG_TEND; - if (!(val & (1 << 5))) + } + if (!(val & (1 << 5))) { s->flags &= ~SH_SERIAL_FLAG_TDE; - if (!(val & (1 << 4))) + } + if (!(val & (1 << 4))) { s->flags &= ~SH_SERIAL_FLAG_BRK; - if (!(val & (1 << 1))) + } + if (!(val & (1 << 1))) { s->flags &= ~SH_SERIAL_FLAG_RDF; - if (!(val & (1 << 0))) + } + if (!(val & (1 << 0))) { s->flags &= ~SH_SERIAL_FLAG_DR; + } if (!(val & (1 << 1)) || !(val & (1 << 0))) { if (s->rxi) { @@ -176,9 +189,8 @@ static void sh_serial_write(void *opaque, hwaddr offs, case 0x24: /* LSR */ return; } - } - else { - switch(offs) { + } else { + switch (offs) { #if 0 case 0x0c: ret = s->dr; @@ -192,20 +204,20 @@ static void sh_serial_write(void *opaque, hwaddr offs, return; } } - - fprintf(stderr, "sh_serial: unsupported write to 0x%02" - HWADDR_PRIx "\n", offs); - abort(); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: unsupported write to 0x%02" HWADDR_PRIx "\n", + __func__, offs); } static uint64_t sh_serial_read(void *opaque, hwaddr offs, unsigned size) { - sh_serial_state *s = opaque; - uint32_t ret = ~0; + SHSerialState *s = opaque; + DeviceState *d = DEVICE(s); + uint32_t ret = UINT32_MAX; #if 0 - switch(offs) { + switch (offs) { case 0x00: ret = s->smr; break; @@ -221,7 +233,7 @@ static uint64_t sh_serial_read(void *opaque, hwaddr offs, } #endif if (s->feat & SH_SERIAL_FEAT_SCIF) { - switch(offs) { + switch (offs) { case 0x00: /* SMR */ ret = s->smr; break; @@ -230,29 +242,37 @@ static uint64_t sh_serial_read(void *opaque, hwaddr offs, break; case 0x10: /* FSR */ ret = 0; - if (s->flags & SH_SERIAL_FLAG_TEND) + if (s->flags & SH_SERIAL_FLAG_TEND) { ret |= (1 << 6); - if (s->flags & SH_SERIAL_FLAG_TDE) + } + if (s->flags & SH_SERIAL_FLAG_TDE) { ret |= (1 << 5); - if (s->flags & SH_SERIAL_FLAG_BRK) + } + if (s->flags & SH_SERIAL_FLAG_BRK) { ret |= (1 << 4); - if (s->flags & SH_SERIAL_FLAG_RDF) + } + if (s->flags & SH_SERIAL_FLAG_RDF) { ret |= (1 << 1); - if (s->flags & SH_SERIAL_FLAG_DR) + } + if (s->flags & SH_SERIAL_FLAG_DR) { ret |= (1 << 0); + } - if (s->scr & (1 << 5)) + if (s->scr & (1 << 5)) { s->flags |= SH_SERIAL_FLAG_TDE | SH_SERIAL_FLAG_TEND; + } break; case 0x14: if (s->rx_cnt > 0) { ret = s->rx_fifo[s->rx_tail++]; s->rx_cnt--; - if (s->rx_tail == SH_RX_FIFO_LENGTH) + if (s->rx_tail == SH_RX_FIFO_LENGTH) { s->rx_tail = 0; - if (s->rx_cnt < s->rtrg) + } + if (s->rx_cnt < s->rtrg) { s->flags &= ~SH_SERIAL_FLAG_RDF; + } } break; case 0x18: @@ -268,9 +288,8 @@ static uint64_t sh_serial_read(void *opaque, hwaddr offs, ret = 0; break; } - } - else { - switch(offs) { + } else { + switch (offs) { #if 0 case 0x0c: ret = s->dr; @@ -287,40 +306,39 @@ static uint64_t sh_serial_read(void *opaque, hwaddr offs, break; } } -#ifdef DEBUG_SERIAL - printf("sh_serial: read offs=0x%02x val=0x%x\n", - offs, ret); -#endif + trace_sh_serial_read(d->id, size, offs, ret); - if (ret & ~((1 << 16) - 1)) { - fprintf(stderr, "sh_serial: unsupported read from 0x%02" - HWADDR_PRIx "\n", offs); - abort(); + if (ret > UINT16_MAX) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: unsupported read from 0x%02" HWADDR_PRIx "\n", + __func__, offs); + ret = 0; } return ret; } -static int sh_serial_can_receive(sh_serial_state *s) +static int sh_serial_can_receive(SHSerialState *s) { return s->scr & (1 << 4); } -static void sh_serial_receive_break(sh_serial_state *s) +static void sh_serial_receive_break(SHSerialState *s) { - if (s->feat & SH_SERIAL_FEAT_SCIF) + if (s->feat & SH_SERIAL_FEAT_SCIF) { s->sr |= (1 << 4); + } } static int sh_serial_can_receive1(void *opaque) { - sh_serial_state *s = opaque; + SHSerialState *s = opaque; return sh_serial_can_receive(s); } static void sh_serial_timeout_int(void *opaque) { - sh_serial_state *s = opaque; + SHSerialState *s = opaque; s->flags |= SH_SERIAL_FLAG_RDF; if (s->scr & (1 << 6) && s->rxi) { @@ -330,7 +348,7 @@ static void sh_serial_timeout_int(void *opaque) static void sh_serial_receive1(void *opaque, const uint8_t *buf, int size) { - sh_serial_state *s = opaque; + SHSerialState *s = opaque; if (s->feat & SH_SERIAL_FEAT_SCIF) { int i; @@ -344,11 +362,11 @@ static void sh_serial_receive1(void *opaque, const uint8_t *buf, int size) if (s->rx_cnt >= s->rtrg) { s->flags |= SH_SERIAL_FLAG_RDF; if (s->scr & (1 << 6) && s->rxi) { - timer_del(s->fifo_timeout_timer); + timer_del(&s->fifo_timeout_timer); qemu_set_irq(s->rxi, 1); } } else { - timer_mod(s->fifo_timeout_timer, + timer_mod(&s->fifo_timeout_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 15 * s->etu); } } @@ -360,9 +378,10 @@ static void sh_serial_receive1(void *opaque, const uint8_t *buf, int size) static void sh_serial_event(void *opaque, QEMUChrEvent event) { - sh_serial_state *s = opaque; - if (event == CHR_EVENT_BREAK) + SHSerialState *s = opaque; + if (event == CHR_EVENT_BREAK) { sh_serial_receive_break(s); + } } static const MemoryRegionOps sh_serial_ops = { @@ -371,20 +390,10 @@ static const MemoryRegionOps sh_serial_ops = { .endianness = DEVICE_NATIVE_ENDIAN, }; -void sh_serial_init(MemoryRegion *sysmem, - hwaddr base, int feat, - uint32_t freq, Chardev *chr, - qemu_irq eri_source, - qemu_irq rxi_source, - qemu_irq txi_source, - qemu_irq tei_source, - qemu_irq bri_source) +static void sh_serial_reset(DeviceState *dev) { - sh_serial_state *s; + SHSerialState *s = SH_SERIAL(dev); - s = g_malloc0(sizeof(sh_serial_state)); - - s->feat = feat; s->flags = SH_SERIAL_FLAG_TEND | SH_SERIAL_FLAG_TDE; s->rtrg = 1; @@ -393,39 +402,64 @@ void sh_serial_init(MemoryRegion *sysmem, s->scr = 1 << 5; /* pretend that TX is enabled so early printk works */ s->sptr = 0; - if (feat & SH_SERIAL_FEAT_SCIF) { + if (s->feat & SH_SERIAL_FEAT_SCIF) { s->fcr = 0; - } - else { + } else { s->dr = 0xff; } sh_serial_clear_fifo(s); +} - memory_region_init_io(&s->iomem, NULL, &sh_serial_ops, s, - "serial", 0x100000000ULL); +static void sh_serial_realize(DeviceState *d, Error **errp) +{ + SHSerialState *s = SH_SERIAL(d); + MemoryRegion *iomem = g_malloc(sizeof(*iomem)); - memory_region_init_alias(&s->iomem_p4, NULL, "serial-p4", &s->iomem, - 0, 0x28); - memory_region_add_subregion(sysmem, P4ADDR(base), &s->iomem_p4); + assert(d->id); + memory_region_init_io(iomem, OBJECT(d), &sh_serial_ops, s, d->id, 0x28); + sysbus_init_mmio(SYS_BUS_DEVICE(d), iomem); + qdev_init_gpio_out_named(d, &s->eri, "eri", 1); + qdev_init_gpio_out_named(d, &s->rxi, "rxi", 1); + qdev_init_gpio_out_named(d, &s->txi, "txi", 1); + qdev_init_gpio_out_named(d, &s->tei, "tei", 1); + qdev_init_gpio_out_named(d, &s->bri, "bri", 1); - memory_region_init_alias(&s->iomem_a7, NULL, "serial-a7", &s->iomem, - 0, 0x28); - memory_region_add_subregion(sysmem, A7ADDR(base), &s->iomem_a7); - - if (chr) { - qemu_chr_fe_init(&s->chr, chr, &error_abort); + if (qemu_chr_fe_backend_connected(&s->chr)) { qemu_chr_fe_set_handlers(&s->chr, sh_serial_can_receive1, sh_serial_receive1, sh_serial_event, NULL, s, NULL, true); } - s->fifo_timeout_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, - sh_serial_timeout_int, s); + timer_init_ns(&s->fifo_timeout_timer, QEMU_CLOCK_VIRTUAL, + sh_serial_timeout_int, s); s->etu = NANOSECONDS_PER_SECOND / 9600; - s->eri = eri_source; - s->rxi = rxi_source; - s->txi = txi_source; - s->tei = tei_source; - s->bri = bri_source; +} + +static void sh_serial_finalize(Object *obj) +{ + SHSerialState *s = SH_SERIAL(obj); + + timer_del(&s->fifo_timeout_timer); +} + +static void sh_serial_init(Object *obj) +{ +} + +static Property sh_serial_properties[] = { + DEFINE_PROP_CHR("chardev", SHSerialState, chr), + DEFINE_PROP_UINT8("features", SHSerialState, feat, 0), + DEFINE_PROP_END_OF_LIST() +}; + +static void sh_serial_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + device_class_set_props(dc, sh_serial_properties); + dc->realize = sh_serial_realize; + dc->reset = sh_serial_reset; + /* Reason: part of SuperH CPU/SoC, needs to be wired up */ + dc->user_creatable = false; } diff --git a/hw/char/shakti_uart.c b/hw/char/shakti_uart.c index 6870821325..98b142c7df 100644 --- a/hw/char/shakti_uart.c +++ b/hw/char/shakti_uart.c @@ -168,6 +168,7 @@ static void shakti_uart_class_init(ObjectClass *klass, void *data) dc->reset = shakti_uart_reset; dc->realize = shakti_uart_realize; device_class_set_props(dc, shakti_uart_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); } static const TypeInfo shakti_uart_info = { diff --git a/hw/char/sifive_uart.c b/hw/char/sifive_uart.c index 278e21c434..1c75f792b3 100644 --- a/hw/char/sifive_uart.c +++ b/hw/char/sifive_uart.c @@ -248,6 +248,7 @@ static void sifive_uart_class_init(ObjectClass *oc, void *data) rc->phases.enter = sifive_uart_reset_enter; rc->phases.hold = sifive_uart_reset_hold; device_class_set_props(dc, sifive_uart_properties); + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); } static const TypeInfo sifive_uart_info = { diff --git a/hw/char/trace-events b/hw/char/trace-events index b774832af4..2ecb36232e 100644 --- a/hw/char/trace-events +++ b/hw/char/trace-events @@ -101,3 +101,7 @@ exynos_uart_rx_timeout(uint32_t channel, uint32_t stat, uint32_t intsp) "UART%d: # cadence_uart.c cadence_uart_baudrate(unsigned baudrate) "baudrate %u" + +# sh_serial.c +sh_serial_read(char *id, unsigned size, uint64_t offs, uint64_t val) " %s size %d offs 0x%02" PRIx64 " -> 0x%02" PRIx64 +sh_serial_write(char *id, unsigned size, uint64_t offs, uint64_t val) "%s size %d offs 0x%02" PRIx64 " <- 0x%02" PRIx64 diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c index dd6bc27b3b..f01ec2137c 100644 --- a/hw/char/virtio-serial-bus.c +++ b/hw/char/virtio-serial-bus.c @@ -1048,8 +1048,8 @@ static void virtio_serial_device_realize(DeviceState *dev, Error **errp) config_size); /* Spawn a new virtio-serial bus on which the ports will ride as devices */ - qbus_create_inplace(&vser->bus, sizeof(vser->bus), TYPE_VIRTIO_SERIAL_BUS, - dev, vdev->bus_name); + qbus_init(&vser->bus, sizeof(vser->bus), TYPE_VIRTIO_SERIAL_BUS, + dev, vdev->bus_name); qbus_set_hotplug_handler(BUS(&vser->bus), OBJECT(vser)); vser->bus.vser = vser; QTAILQ_INIT(&vser->ports); diff --git a/hw/core/bus.c b/hw/core/bus.c index 9cfbc3a687..c7831b5293 100644 --- a/hw/core/bus.c +++ b/hw/core/bus.c @@ -99,7 +99,8 @@ static void bus_reset_child_foreach(Object *obj, ResettableChildCallback cb, } } -static void qbus_init(BusState *bus, DeviceState *parent, const char *name) +static void qbus_init_internal(BusState *bus, DeviceState *parent, + const char *name) { const char *typename = object_get_typename(OBJECT(bus)); BusClass *bc; @@ -151,19 +152,19 @@ static void bus_unparent(Object *obj) bus->parent = NULL; } -void qbus_create_inplace(void *bus, size_t size, const char *typename, - DeviceState *parent, const char *name) +void qbus_init(void *bus, size_t size, const char *typename, + DeviceState *parent, const char *name) { object_initialize(bus, size, typename); - qbus_init(bus, parent, name); + qbus_init_internal(bus, parent, name); } -BusState *qbus_create(const char *typename, DeviceState *parent, const char *name) +BusState *qbus_new(const char *typename, DeviceState *parent, const char *name) { BusState *bus; bus = BUS(object_new(typename)); - qbus_init(bus, parent, name); + qbus_init_internal(bus, parent, name); return bus; } diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c index e2f5a64604..9e3241b430 100644 --- a/hw/core/cpu-common.c +++ b/hw/core/cpu-common.c @@ -257,21 +257,6 @@ static int64_t cpu_common_get_arch_id(CPUState *cpu) return cpu->cpu_index; } -static Property cpu_common_props[] = { -#ifndef CONFIG_USER_ONLY - /* Create a memory property for softmmu CPU object, - * so users can wire up its memory. (This can't go in hw/core/cpu.c - * because that file is compiled only once for both user-mode - * and system builds.) The default if no link is set up is to use - * the system address space. - */ - DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION, - MemoryRegion *), -#endif - DEFINE_PROP_BOOL("start-powered-off", CPUState, start_powered_off, false), - DEFINE_PROP_END_OF_LIST(), -}; - static void cpu_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -286,7 +271,7 @@ static void cpu_class_init(ObjectClass *klass, void *data) dc->realize = cpu_common_realizefn; dc->unrealize = cpu_common_unrealizefn; dc->reset = cpu_common_reset; - device_class_set_props(dc, cpu_common_props); + cpu_class_init_props(dc); /* * Reason: CPUs still need special care by board code: wiring up * IRQs, adding reset handlers, halting non-first CPUs, ... diff --git a/hw/core/gpio.c b/hw/core/gpio.c new file mode 100644 index 0000000000..8e6b4f5edf --- /dev/null +++ b/hw/core/gpio.c @@ -0,0 +1,197 @@ +/* + * qdev GPIO helpers + * + * Copyright (c) 2009 CodeSourcery + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/qdev-core.h" +#include "hw/irq.h" +#include "qapi/error.h" + +static NamedGPIOList *qdev_get_named_gpio_list(DeviceState *dev, + const char *name) +{ + NamedGPIOList *ngl; + + QLIST_FOREACH(ngl, &dev->gpios, node) { + /* NULL is a valid and matchable name. */ + if (g_strcmp0(name, ngl->name) == 0) { + return ngl; + } + } + + ngl = g_malloc0(sizeof(*ngl)); + ngl->name = g_strdup(name); + QLIST_INSERT_HEAD(&dev->gpios, ngl, node); + return ngl; +} + +void qdev_init_gpio_in_named_with_opaque(DeviceState *dev, + qemu_irq_handler handler, + void *opaque, + const char *name, int n) +{ + int i; + NamedGPIOList *gpio_list = qdev_get_named_gpio_list(dev, name); + + assert(gpio_list->num_out == 0 || !name); + gpio_list->in = qemu_extend_irqs(gpio_list->in, gpio_list->num_in, handler, + opaque, n); + + if (!name) { + name = "unnamed-gpio-in"; + } + for (i = gpio_list->num_in; i < gpio_list->num_in + n; i++) { + gchar *propname = g_strdup_printf("%s[%u]", name, i); + + object_property_add_child(OBJECT(dev), propname, + OBJECT(gpio_list->in[i])); + g_free(propname); + } + + gpio_list->num_in += n; +} + +void qdev_init_gpio_in(DeviceState *dev, qemu_irq_handler handler, int n) +{ + qdev_init_gpio_in_named(dev, handler, NULL, n); +} + +void qdev_init_gpio_out_named(DeviceState *dev, qemu_irq *pins, + const char *name, int n) +{ + int i; + NamedGPIOList *gpio_list = qdev_get_named_gpio_list(dev, name); + + assert(gpio_list->num_in == 0 || !name); + + if (!name) { + name = "unnamed-gpio-out"; + } + memset(pins, 0, sizeof(*pins) * n); + for (i = 0; i < n; ++i) { + gchar *propname = g_strdup_printf("%s[%u]", name, + gpio_list->num_out + i); + + object_property_add_link(OBJECT(dev), propname, TYPE_IRQ, + (Object **)&pins[i], + object_property_allow_set_link, + OBJ_PROP_LINK_STRONG); + g_free(propname); + } + gpio_list->num_out += n; +} + +void qdev_init_gpio_out(DeviceState *dev, qemu_irq *pins, int n) +{ + qdev_init_gpio_out_named(dev, pins, NULL, n); +} + +qemu_irq qdev_get_gpio_in_named(DeviceState *dev, const char *name, int n) +{ + NamedGPIOList *gpio_list = qdev_get_named_gpio_list(dev, name); + + assert(n >= 0 && n < gpio_list->num_in); + return gpio_list->in[n]; +} + +qemu_irq qdev_get_gpio_in(DeviceState *dev, int n) +{ + return qdev_get_gpio_in_named(dev, NULL, n); +} + +void qdev_connect_gpio_out_named(DeviceState *dev, const char *name, int n, + qemu_irq pin) +{ + char *propname = g_strdup_printf("%s[%d]", + name ? name : "unnamed-gpio-out", n); + if (pin && !OBJECT(pin)->parent) { + /* We need a name for object_property_set_link to work */ + object_property_add_child(container_get(qdev_get_machine(), + "/unattached"), + "non-qdev-gpio[*]", OBJECT(pin)); + } + object_property_set_link(OBJECT(dev), propname, OBJECT(pin), &error_abort); + g_free(propname); +} + +qemu_irq qdev_get_gpio_out_connector(DeviceState *dev, const char *name, int n) +{ + g_autofree char *propname = g_strdup_printf("%s[%d]", + name ? name : "unnamed-gpio-out", n); + + qemu_irq ret = (qemu_irq)object_property_get_link(OBJECT(dev), propname, + NULL); + + return ret; +} + +/* disconnect a GPIO output, returning the disconnected input (if any) */ + +static qemu_irq qdev_disconnect_gpio_out_named(DeviceState *dev, + const char *name, int n) +{ + char *propname = g_strdup_printf("%s[%d]", + name ? name : "unnamed-gpio-out", n); + + qemu_irq ret = (qemu_irq)object_property_get_link(OBJECT(dev), propname, + NULL); + if (ret) { + object_property_set_link(OBJECT(dev), propname, NULL, NULL); + } + g_free(propname); + return ret; +} + +qemu_irq qdev_intercept_gpio_out(DeviceState *dev, qemu_irq icpt, + const char *name, int n) +{ + qemu_irq disconnected = qdev_disconnect_gpio_out_named(dev, name, n); + qdev_connect_gpio_out_named(dev, name, n, icpt); + return disconnected; +} + +void qdev_connect_gpio_out(DeviceState *dev, int n, qemu_irq pin) +{ + qdev_connect_gpio_out_named(dev, NULL, n, pin); +} + +void qdev_pass_gpios(DeviceState *dev, DeviceState *container, + const char *name) +{ + int i; + NamedGPIOList *ngl = qdev_get_named_gpio_list(dev, name); + + for (i = 0; i < ngl->num_in; i++) { + const char *nm = ngl->name ? ngl->name : "unnamed-gpio-in"; + char *propname = g_strdup_printf("%s[%d]", nm, i); + + object_property_add_alias(OBJECT(container), propname, + OBJECT(dev), propname); + g_free(propname); + } + for (i = 0; i < ngl->num_out; i++) { + const char *nm = ngl->name ? ngl->name : "unnamed-gpio-out"; + char *propname = g_strdup_printf("%s[%d]", nm, i); + + object_property_add_alias(OBJECT(container), propname, + OBJECT(dev), propname); + g_free(propname); + } + QLIST_REMOVE(ngl, node); + QLIST_INSERT_HEAD(&container->gpios, ngl, node); +} diff --git a/hw/core/hotplug-stubs.c b/hw/core/hotplug-stubs.c new file mode 100644 index 0000000000..7aadaa29bd --- /dev/null +++ b/hw/core/hotplug-stubs.c @@ -0,0 +1,34 @@ +/* + * Hotplug handler stubs + * + * Copyright (c) Red Hat + * + * Authors: + * Philippe Mathieu-Daudé , + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "hw/qdev-core.h" + +HotplugHandler *qdev_get_hotplug_handler(DeviceState *dev) +{ + return NULL; +} + +void hotplug_handler_pre_plug(HotplugHandler *plug_handler, + DeviceState *plugged_dev, + Error **errp) +{ + g_assert_not_reached(); +} + +void hotplug_handler_plug(HotplugHandler *plug_handler, + DeviceState *plugged_dev, + Error **errp) +{ + g_assert_not_reached(); +} diff --git a/hw/core/loader.c b/hw/core/loader.c index c623318b73..052a0fd719 100644 --- a/hw/core/loader.c +++ b/hw/core/loader.c @@ -46,6 +46,8 @@ #include "qemu-common.h" #include "qemu/datadir.h" #include "qapi/error.h" +#include "qapi/qapi-commands-machine.h" +#include "qapi/type-helpers.h" #include "trace.h" #include "hw/hw.h" #include "disas/disas.h" @@ -326,7 +328,7 @@ static void *load_at(int fd, off_t offset, size_t size) #define SZ 64 #include "hw/elf_ops.h" -const char *load_elf_strerror(int error) +const char *load_elf_strerror(ssize_t error) { switch (error) { case 0: @@ -402,12 +404,12 @@ fail: } /* return < 0 if error, otherwise the number of bytes loaded in memory */ -int load_elf(const char *filename, - uint64_t (*elf_note_fn)(void *, void *, bool), - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, - uint64_t *highaddr, uint32_t *pflags, int big_endian, - int elf_machine, int clear_lsb, int data_swab) +ssize_t load_elf(const char *filename, + uint64_t (*elf_note_fn)(void *, void *, bool), + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, + uint64_t *highaddr, uint32_t *pflags, int big_endian, + int elf_machine, int clear_lsb, int data_swab) { return load_elf_as(filename, elf_note_fn, translate_fn, translate_opaque, pentry, lowaddr, highaddr, pflags, big_endian, @@ -415,12 +417,13 @@ int load_elf(const char *filename, } /* return < 0 if error, otherwise the number of bytes loaded in memory */ -int load_elf_as(const char *filename, - uint64_t (*elf_note_fn)(void *, void *, bool), - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, - uint64_t *highaddr, uint32_t *pflags, int big_endian, - int elf_machine, int clear_lsb, int data_swab, AddressSpace *as) +ssize_t load_elf_as(const char *filename, + uint64_t (*elf_note_fn)(void *, void *, bool), + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, + uint64_t *highaddr, uint32_t *pflags, int big_endian, + int elf_machine, int clear_lsb, int data_swab, + AddressSpace *as) { return load_elf_ram(filename, elf_note_fn, translate_fn, translate_opaque, pentry, lowaddr, highaddr, pflags, big_endian, @@ -428,13 +431,13 @@ int load_elf_as(const char *filename, } /* return < 0 if error, otherwise the number of bytes loaded in memory */ -int load_elf_ram(const char *filename, - uint64_t (*elf_note_fn)(void *, void *, bool), - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, - uint64_t *highaddr, uint32_t *pflags, int big_endian, - int elf_machine, int clear_lsb, int data_swab, - AddressSpace *as, bool load_rom) +ssize_t load_elf_ram(const char *filename, + uint64_t (*elf_note_fn)(void *, void *, bool), + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, uint64_t *pentry, + uint64_t *lowaddr, uint64_t *highaddr, uint32_t *pflags, + int big_endian, int elf_machine, int clear_lsb, + int data_swab, AddressSpace *as, bool load_rom) { return load_elf_ram_sym(filename, elf_note_fn, translate_fn, translate_opaque, @@ -444,16 +447,17 @@ int load_elf_ram(const char *filename, } /* return < 0 if error, otherwise the number of bytes loaded in memory */ -int load_elf_ram_sym(const char *filename, - uint64_t (*elf_note_fn)(void *, void *, bool), - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque, uint64_t *pentry, - uint64_t *lowaddr, uint64_t *highaddr, uint32_t *pflags, - int big_endian, int elf_machine, - int clear_lsb, int data_swab, - AddressSpace *as, bool load_rom, symbol_fn_t sym_cb) +ssize_t load_elf_ram_sym(const char *filename, + uint64_t (*elf_note_fn)(void *, void *, bool), + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, uint64_t *pentry, + uint64_t *lowaddr, uint64_t *highaddr, + uint32_t *pflags, int big_endian, int elf_machine, + int clear_lsb, int data_swab, + AddressSpace *as, bool load_rom, symbol_fn_t sym_cb) { - int fd, data_order, target_data_order, must_swab, ret = ELF_LOAD_FAILED; + int fd, data_order, target_data_order, must_swab; + ssize_t ret = ELF_LOAD_FAILED; uint8_t e_ident[EI_NIDENT]; fd = open(filename, O_RDONLY | O_BINARY); @@ -1472,32 +1476,35 @@ void *rom_ptr_for_as(AddressSpace *as, hwaddr addr, size_t size) return cbdata.rom; } -void hmp_info_roms(Monitor *mon, const QDict *qdict) +HumanReadableText *qmp_x_query_roms(Error **errp) { Rom *rom; + g_autoptr(GString) buf = g_string_new(""); QTAILQ_FOREACH(rom, &roms, next) { if (rom->mr) { - monitor_printf(mon, "%s" - " size=0x%06zx name=\"%s\"\n", - memory_region_name(rom->mr), - rom->romsize, - rom->name); + g_string_append_printf(buf, "%s" + " size=0x%06zx name=\"%s\"\n", + memory_region_name(rom->mr), + rom->romsize, + rom->name); } else if (!rom->fw_file) { - monitor_printf(mon, "addr=" TARGET_FMT_plx - " size=0x%06zx mem=%s name=\"%s\"\n", - rom->addr, rom->romsize, - rom->isrom ? "rom" : "ram", - rom->name); + g_string_append_printf(buf, "addr=" TARGET_FMT_plx + " size=0x%06zx mem=%s name=\"%s\"\n", + rom->addr, rom->romsize, + rom->isrom ? "rom" : "ram", + rom->name); } else { - monitor_printf(mon, "fw=%s/%s" - " size=0x%06zx name=\"%s\"\n", - rom->fw_dir, - rom->fw_file, - rom->romsize, - rom->name); + g_string_append_printf(buf, "fw=%s/%s" + " size=0x%06zx name=\"%s\"\n", + rom->fw_dir, + rom->fw_file, + rom->romsize, + rom->name); } } + + return human_readable_text_from_str(buf); } typedef enum HexRecord HexRecord; diff --git a/hw/core/machine-hmp-cmds.c b/hw/core/machine-hmp-cmds.c index 76b22b00d6..4e2f319aeb 100644 --- a/hw/core/machine-hmp-cmds.c +++ b/hw/core/machine-hmp-cmds.c @@ -53,8 +53,7 @@ void hmp_hotpluggable_cpus(Monitor *mon, const QDict *qdict) HotpluggableCPUList *saved = l; CpuInstanceProperties *c; - if (err != NULL) { - hmp_handle_error(mon, err); + if (hmp_handle_error(mon, err)) { return; } @@ -131,38 +130,3 @@ void hmp_info_memdev(Monitor *mon, const QDict *qdict) qapi_free_MemdevList(memdev_list); hmp_handle_error(mon, err); } - -void hmp_info_numa(Monitor *mon, const QDict *qdict) -{ - int i, nb_numa_nodes; - NumaNodeMem *node_mem; - CpuInfoFastList *cpu_list, *cpu; - MachineState *ms = MACHINE(qdev_get_machine()); - - nb_numa_nodes = ms->numa_state ? ms->numa_state->num_nodes : 0; - monitor_printf(mon, "%d nodes\n", nb_numa_nodes); - if (!nb_numa_nodes) { - return; - } - - cpu_list = qmp_query_cpus_fast(&error_abort); - node_mem = g_new0(NumaNodeMem, nb_numa_nodes); - - query_numa_node_mem(node_mem, ms); - for (i = 0; i < nb_numa_nodes; i++) { - monitor_printf(mon, "node %d cpus:", i); - for (cpu = cpu_list; cpu; cpu = cpu->next) { - if (cpu->value->has_props && cpu->value->props->has_node_id && - cpu->value->props->node_id == i) { - monitor_printf(mon, " %" PRIi64, cpu->value->cpu_index); - } - } - monitor_printf(mon, "\n"); - monitor_printf(mon, "node %d size: %" PRId64 " MB\n", i, - node_mem[i].node_mem >> 20); - monitor_printf(mon, "node %d plugged: %" PRId64 " MB\n", i, - node_mem[i].node_plugged_mem >> 20); - } - qapi_free_CpuInfoFastList(cpu_list); - g_free(node_mem); -} diff --git a/hw/core/machine-qmp-cmds.c b/hw/core/machine-qmp-cmds.c index 216fdfaf3a..4f4ab30f8c 100644 --- a/hw/core/machine-qmp-cmds.c +++ b/hw/core/machine-qmp-cmds.c @@ -15,6 +15,7 @@ #include "qapi/qmp/qerror.h" #include "qapi/qmp/qobject.h" #include "qapi/qobject-input-visitor.h" +#include "qapi/type-helpers.h" #include "qemu/main-loop.h" #include "qom/qom-qobject.h" #include "sysemu/hostmem.h" @@ -204,3 +205,42 @@ MemdevList *qmp_query_memdev(Error **errp) object_child_foreach(obj, query_memdev, &list); return list; } + +HumanReadableText *qmp_x_query_numa(Error **errp) +{ + g_autoptr(GString) buf = g_string_new(""); + int i, nb_numa_nodes; + NumaNodeMem *node_mem; + CpuInfoFastList *cpu_list, *cpu; + MachineState *ms = MACHINE(qdev_get_machine()); + + nb_numa_nodes = ms->numa_state ? ms->numa_state->num_nodes : 0; + g_string_append_printf(buf, "%d nodes\n", nb_numa_nodes); + if (!nb_numa_nodes) { + goto done; + } + + cpu_list = qmp_query_cpus_fast(&error_abort); + node_mem = g_new0(NumaNodeMem, nb_numa_nodes); + + query_numa_node_mem(node_mem, ms); + for (i = 0; i < nb_numa_nodes; i++) { + g_string_append_printf(buf, "node %d cpus:", i); + for (cpu = cpu_list; cpu; cpu = cpu->next) { + if (cpu->value->has_props && cpu->value->props->has_node_id && + cpu->value->props->node_id == i) { + g_string_append_printf(buf, " %" PRIi64, cpu->value->cpu_index); + } + } + g_string_append_printf(buf, "\n"); + g_string_append_printf(buf, "node %d size: %" PRId64 " MB\n", i, + node_mem[i].node_mem >> 20); + g_string_append_printf(buf, "node %d plugged: %" PRId64 " MB\n", i, + node_mem[i].node_plugged_mem >> 20); + } + qapi_free_CpuInfoFastList(cpu_list); + g_free(node_mem); + + done: + return human_readable_text_from_str(buf); +} diff --git a/hw/core/machine-smp.c b/hw/core/machine-smp.c new file mode 100644 index 0000000000..116a0cbbfa --- /dev/null +++ b/hw/core/machine-smp.c @@ -0,0 +1,181 @@ +/* + * QEMU Machine core (related to -smp parsing) + * + * Copyright (c) 2021 Huawei Technologies Co., Ltd + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, + * or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/boards.h" +#include "qapi/error.h" + +/* + * Report information of a machine's supported CPU topology hierarchy. + * Topology members will be ordered from the largest to the smallest + * in the string. + */ +static char *cpu_hierarchy_to_string(MachineState *ms) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + GString *s = g_string_new(NULL); + + g_string_append_printf(s, "sockets (%u)", ms->smp.sockets); + + if (mc->smp_props.dies_supported) { + g_string_append_printf(s, " * dies (%u)", ms->smp.dies); + } + + g_string_append_printf(s, " * cores (%u)", ms->smp.cores); + g_string_append_printf(s, " * threads (%u)", ms->smp.threads); + + return g_string_free(s, false); +} + +/* + * smp_parse - Generic function used to parse the given SMP configuration + * + * Any missing parameter in "cpus/maxcpus/sockets/cores/threads" will be + * automatically computed based on the provided ones. + * + * In the calculation of omitted sockets/cores/threads: we prefer sockets + * over cores over threads before 6.2, while preferring cores over sockets + * over threads since 6.2. + * + * In the calculation of cpus/maxcpus: When both maxcpus and cpus are omitted, + * maxcpus will be computed from the given parameters and cpus will be set + * equal to maxcpus. When only one of maxcpus and cpus is given then the + * omitted one will be set to its given counterpart's value. Both maxcpus and + * cpus may be specified, but maxcpus must be equal to or greater than cpus. + * + * For compatibility, apart from the parameters that will be computed, newly + * introduced topology members which are likely to be target specific should + * be directly set as 1 if they are omitted (e.g. dies for PC since 4.1). + */ +void smp_parse(MachineState *ms, SMPConfiguration *config, Error **errp) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + unsigned cpus = config->has_cpus ? config->cpus : 0; + unsigned sockets = config->has_sockets ? config->sockets : 0; + unsigned dies = config->has_dies ? config->dies : 0; + unsigned cores = config->has_cores ? config->cores : 0; + unsigned threads = config->has_threads ? config->threads : 0; + unsigned maxcpus = config->has_maxcpus ? config->maxcpus : 0; + + /* + * Specified CPU topology parameters must be greater than zero, + * explicit configuration like "cpus=0" is not allowed. + */ + if ((config->has_cpus && config->cpus == 0) || + (config->has_sockets && config->sockets == 0) || + (config->has_dies && config->dies == 0) || + (config->has_cores && config->cores == 0) || + (config->has_threads && config->threads == 0) || + (config->has_maxcpus && config->maxcpus == 0)) { + warn_report("Deprecated CPU topology (considered invalid): " + "CPU topology parameters must be greater than zero"); + } + + /* + * If not supported by the machine, a topology parameter must be + * omitted or specified equal to 1. + */ + if (!mc->smp_props.dies_supported && dies > 1) { + error_setg(errp, "dies not supported by this machine's CPU topology"); + return; + } + + dies = dies > 0 ? dies : 1; + + /* compute missing values based on the provided ones */ + if (cpus == 0 && maxcpus == 0) { + sockets = sockets > 0 ? sockets : 1; + cores = cores > 0 ? cores : 1; + threads = threads > 0 ? threads : 1; + } else { + maxcpus = maxcpus > 0 ? maxcpus : cpus; + + if (mc->smp_props.prefer_sockets) { + /* prefer sockets over cores before 6.2 */ + if (sockets == 0) { + cores = cores > 0 ? cores : 1; + threads = threads > 0 ? threads : 1; + sockets = maxcpus / (dies * cores * threads); + } else if (cores == 0) { + threads = threads > 0 ? threads : 1; + cores = maxcpus / (sockets * dies * threads); + } + } else { + /* prefer cores over sockets since 6.2 */ + if (cores == 0) { + sockets = sockets > 0 ? sockets : 1; + threads = threads > 0 ? threads : 1; + cores = maxcpus / (sockets * dies * threads); + } else if (sockets == 0) { + threads = threads > 0 ? threads : 1; + sockets = maxcpus / (dies * cores * threads); + } + } + + /* try to calculate omitted threads at last */ + if (threads == 0) { + threads = maxcpus / (sockets * dies * cores); + } + } + + maxcpus = maxcpus > 0 ? maxcpus : sockets * dies * cores * threads; + cpus = cpus > 0 ? cpus : maxcpus; + + ms->smp.cpus = cpus; + ms->smp.sockets = sockets; + ms->smp.dies = dies; + ms->smp.cores = cores; + ms->smp.threads = threads; + ms->smp.max_cpus = maxcpus; + + /* sanity-check of the computed topology */ + if (sockets * dies * cores * threads != maxcpus) { + g_autofree char *topo_msg = cpu_hierarchy_to_string(ms); + error_setg(errp, "Invalid CPU topology: " + "product of the hierarchy must match maxcpus: " + "%s != maxcpus (%u)", + topo_msg, maxcpus); + return; + } + + if (maxcpus < cpus) { + g_autofree char *topo_msg = cpu_hierarchy_to_string(ms); + error_setg(errp, "Invalid CPU topology: " + "maxcpus must be equal to or greater than smp: " + "%s == maxcpus (%u) < smp_cpus (%u)", + topo_msg, maxcpus, cpus); + return; + } + + if (ms->smp.cpus < mc->min_cpus) { + error_setg(errp, "Invalid SMP CPUs %d. The min CPUs " + "supported by machine '%s' is %d", + ms->smp.cpus, + mc->name, mc->min_cpus); + return; + } + + if (ms->smp.max_cpus > mc->max_cpus) { + error_setg(errp, "Invalid SMP CPUs %d. The max CPUs " + "supported by machine '%s' is %d", + ms->smp.max_cpus, + mc->name, mc->max_cpus); + return; + } +} diff --git a/hw/core/machine.c b/hw/core/machine.c index 067f42b528..26ec54e726 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -37,7 +37,9 @@ #include "hw/virtio/virtio.h" #include "hw/virtio/virtio-pci.h" -GlobalProperty hw_compat_6_1[] = {}; +GlobalProperty hw_compat_6_1[] = { + { "vhost-user-vsock-device", "seqpacket", "off" }, +}; const size_t hw_compat_6_1_len = G_N_ELEMENTS(hw_compat_6_1); GlobalProperty hw_compat_6_0[] = { @@ -46,6 +48,7 @@ GlobalProperty hw_compat_6_0[] = { { "nvme-ns", "eui64-default", "off"}, { "e1000", "init-vet", "off" }, { "e1000e", "init-vet", "off" }, + { "vhost-vsock-device", "seqpacket", "off" }, }; const size_t hw_compat_6_0_len = G_N_ELEMENTS(hw_compat_6_0); @@ -53,7 +56,7 @@ GlobalProperty hw_compat_5_2[] = { { "ICH9-LPC", "smm-compat", "on"}, { "PIIX4_PM", "smm-compat", "on"}, { "virtio-blk-device", "report-discard-granularity", "off" }, - { "virtio-net-pci", "vectors", "3"}, + { "virtio-net-pci-base", "vectors", "3"}, }; const size_t hw_compat_5_2_len = G_N_ELEMENTS(hw_compat_5_2); @@ -545,35 +548,30 @@ void machine_class_allow_dynamic_sysbus_dev(MachineClass *mc, const char *type) bool device_is_dynamic_sysbus(MachineClass *mc, DeviceState *dev) { - bool allowed = false; - strList *wl; Object *obj = OBJECT(dev); if (!object_dynamic_cast(obj, TYPE_SYS_BUS_DEVICE)) { return false; } + return device_type_is_dynamic_sysbus(mc, object_get_typename(obj)); +} + +bool device_type_is_dynamic_sysbus(MachineClass *mc, const char *type) +{ + bool allowed = false; + strList *wl; + ObjectClass *klass = object_class_by_name(type); + for (wl = mc->allowed_dynamic_sysbus_devices; !allowed && wl; wl = wl->next) { - allowed |= !!object_dynamic_cast(obj, wl->value); + allowed |= !!object_class_dynamic_cast(klass, wl->value); } return allowed; } -static void validate_sysbus_device(SysBusDevice *sbdev, void *opaque) -{ - MachineState *machine = opaque; - MachineClass *mc = MACHINE_GET_CLASS(machine); - - if (!device_is_dynamic_sysbus(mc, DEVICE(sbdev))) { - error_report("Option '-device %s' cannot be handled by this machine", - object_class_get_name(object_get_class(OBJECT(sbdev)))); - exit(1); - } -} - static char *machine_get_memdev(Object *obj, Error **errp) { MachineState *ms = MACHINE(obj); @@ -589,17 +587,6 @@ static void machine_set_memdev(Object *obj, const char *value, Error **errp) ms->ram_memdev_id = g_strdup(value); } -static void machine_init_notify(Notifier *notifier, void *data) -{ - MachineState *machine = MACHINE(qdev_get_machine()); - - /* - * Loop through all dynamically created sysbus devices and check if they are - * all allowed. If a device is not allowed, error out. - */ - foreach_dynamic_sysbus_device(validate_sysbus_device, machine); -} - HotpluggableCPUList *machine_query_hotpluggable_cpus(MachineState *machine) { int i; @@ -746,76 +733,16 @@ void machine_set_cpu_numa_node(MachineState *machine, } } -static void smp_parse(MachineState *ms, SMPConfiguration *config, Error **errp) -{ - unsigned cpus = config->has_cpus ? config->cpus : 0; - unsigned sockets = config->has_sockets ? config->sockets : 0; - unsigned cores = config->has_cores ? config->cores : 0; - unsigned threads = config->has_threads ? config->threads : 0; - - if (config->has_dies && config->dies != 0 && config->dies != 1) { - error_setg(errp, "dies not supported by this machine's CPU topology"); - return; - } - - /* compute missing values, prefer sockets over cores over threads */ - if (cpus == 0 || sockets == 0) { - cores = cores > 0 ? cores : 1; - threads = threads > 0 ? threads : 1; - if (cpus == 0) { - sockets = sockets > 0 ? sockets : 1; - cpus = cores * threads * sockets; - } else { - ms->smp.max_cpus = config->has_maxcpus ? config->maxcpus : cpus; - sockets = ms->smp.max_cpus / (cores * threads); - } - } else if (cores == 0) { - threads = threads > 0 ? threads : 1; - cores = cpus / (sockets * threads); - cores = cores > 0 ? cores : 1; - } else if (threads == 0) { - threads = cpus / (cores * sockets); - threads = threads > 0 ? threads : 1; - } else if (sockets * cores * threads < cpus) { - error_setg(errp, "cpu topology: " - "sockets (%u) * cores (%u) * threads (%u) < " - "smp_cpus (%u)", - sockets, cores, threads, cpus); - return; - } - - ms->smp.max_cpus = config->has_maxcpus ? config->maxcpus : cpus; - - if (ms->smp.max_cpus < cpus) { - error_setg(errp, "maxcpus must be equal to or greater than smp"); - return; - } - - if (sockets * cores * threads != ms->smp.max_cpus) { - error_setg(errp, "Invalid CPU topology: " - "sockets (%u) * cores (%u) * threads (%u) " - "!= maxcpus (%u)", - sockets, cores, threads, - ms->smp.max_cpus); - return; - } - - ms->smp.cpus = cpus; - ms->smp.cores = cores; - ms->smp.threads = threads; - ms->smp.sockets = sockets; -} - static void machine_get_smp(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { MachineState *ms = MACHINE(obj); SMPConfiguration *config = &(SMPConfiguration){ - .has_cores = true, .cores = ms->smp.cores, + .has_cpus = true, .cpus = ms->smp.cpus, .has_sockets = true, .sockets = ms->smp.sockets, .has_dies = true, .dies = ms->smp.dies, + .has_cores = true, .cores = ms->smp.cores, .has_threads = true, .threads = ms->smp.threads, - .has_cpus = true, .cpus = ms->smp.cpus, .has_maxcpus = true, .maxcpus = ms->smp.max_cpus, }; if (!visit_type_SMPConfiguration(v, name, &config, &error_abort)) { @@ -826,35 +753,14 @@ static void machine_get_smp(Object *obj, Visitor *v, const char *name, static void machine_set_smp(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { - MachineClass *mc = MACHINE_GET_CLASS(obj); MachineState *ms = MACHINE(obj); - SMPConfiguration *config; - ERRP_GUARD(); + g_autoptr(SMPConfiguration) config = NULL; if (!visit_type_SMPConfiguration(v, name, &config, errp)) { return; } - mc->smp_parse(ms, config, errp); - if (*errp) { - goto out_free; - } - - /* sanity-check smp_cpus and max_cpus against mc */ - if (ms->smp.cpus < mc->min_cpus) { - error_setg(errp, "Invalid SMP CPUs %d. The min CPUs " - "supported by machine '%s' is %d", - ms->smp.cpus, - mc->name, mc->min_cpus); - } else if (ms->smp.max_cpus > mc->max_cpus) { - error_setg(errp, "Invalid SMP CPUs %d. The max CPUs " - "supported by machine '%s' is %d", - current_machine->smp.max_cpus, - mc->name, mc->max_cpus); - } - -out_free: - qapi_free_SMPConfiguration(config); + smp_parse(ms, config, errp); } static void machine_class_init(ObjectClass *oc, void *data) @@ -864,7 +770,6 @@ static void machine_class_init(ObjectClass *oc, void *data) /* Default 128 MB as guest ram size */ mc->default_ram_size = 128 * MiB; mc->rom_file_has_mr = true; - mc->smp_parse = smp_parse; /* numa node memory size aligned on 8MB by default. * On Linux, each node's border has to be 8MB aligned @@ -1021,17 +926,13 @@ static void machine_initfn(Object *obj) "Table (HMAT)"); } - /* Register notifier when init is done for sysbus sanity checks */ - ms->sysbus_notifier.notify = machine_init_notify; - qemu_add_machine_init_done_notifier(&ms->sysbus_notifier); - /* default to mc->default_cpus */ ms->smp.cpus = mc->default_cpus; ms->smp.max_cpus = mc->default_cpus; - ms->smp.cores = 1; - ms->smp.dies = 1; - ms->smp.threads = 1; ms->smp.sockets = 1; + ms->smp.dies = 1; + ms->smp.cores = 1; + ms->smp.threads = 1; } static void machine_finalize(Object *obj) @@ -1077,6 +978,9 @@ static char *cpu_slot_to_string(const CPUArchId *cpu) g_string_append_printf(s, "socket-id: %"PRId64, cpu->props.socket_id); } if (cpu->props.has_die_id) { + if (s->len) { + g_string_append_printf(s, ", "); + } g_string_append_printf(s, "die-id: %"PRId64, cpu->props.die_id); } if (cpu->props.has_core_id) { diff --git a/hw/core/meson.build b/hw/core/meson.build index 18f44fb7c2..0f884d6fd4 100644 --- a/hw/core/meson.build +++ b/hw/core/meson.build @@ -1,7 +1,6 @@ # core qdev-related obj files, also used by *-user and unit tests -hwcore_files = files( +hwcore_ss.add(files( 'bus.c', - 'hotplug.c', 'qdev-properties.c', 'qdev.c', 'reset.c', @@ -11,22 +10,34 @@ hwcore_files = files( 'irq.c', 'clock.c', 'qdev-clock.c', -) +)) +if have_system + hwcore_ss.add(files( + 'hotplug.c', + 'qdev-hotplug.c', + )) +else + hwcore_ss.add(files( + 'hotplug-stubs.c', + )) +endif common_ss.add(files('cpu-common.c')) -common_ss.add(when: 'CONFIG_FITLOADER', if_true: files('loader-fit.c')) -common_ss.add(when: 'CONFIG_GENERIC_LOADER', if_true: files('generic-loader.c')) -common_ss.add(when: ['CONFIG_GUEST_LOADER', fdt], if_true: files('guest-loader.c')) -common_ss.add(when: 'CONFIG_OR_IRQ', if_true: files('or-irq.c')) -common_ss.add(when: 'CONFIG_PLATFORM_BUS', if_true: files('platform-bus.c')) -common_ss.add(when: 'CONFIG_PTIMER', if_true: files('ptimer.c')) -common_ss.add(when: 'CONFIG_REGISTER', if_true: files('register.c')) -common_ss.add(when: 'CONFIG_SPLIT_IRQ', if_true: files('split-irq.c')) -common_ss.add(when: 'CONFIG_XILINX_AXI', if_true: files('stream.c')) +common_ss.add(files('machine-smp.c')) +softmmu_ss.add(when: 'CONFIG_FITLOADER', if_true: files('loader-fit.c')) +softmmu_ss.add(when: 'CONFIG_GENERIC_LOADER', if_true: files('generic-loader.c')) +softmmu_ss.add(when: ['CONFIG_GUEST_LOADER', fdt], if_true: files('guest-loader.c')) +softmmu_ss.add(when: 'CONFIG_OR_IRQ', if_true: files('or-irq.c')) +softmmu_ss.add(when: 'CONFIG_PLATFORM_BUS', if_true: files('platform-bus.c')) +softmmu_ss.add(when: 'CONFIG_PTIMER', if_true: files('ptimer.c')) +softmmu_ss.add(when: 'CONFIG_REGISTER', if_true: files('register.c')) +softmmu_ss.add(when: 'CONFIG_SPLIT_IRQ', if_true: files('split-irq.c')) +softmmu_ss.add(when: 'CONFIG_XILINX_AXI', if_true: files('stream.c')) softmmu_ss.add(files( 'cpu-sysemu.c', 'fw-path-provider.c', + 'gpio.c', 'loader.c', 'machine-hmp-cmds.c', 'machine.c', diff --git a/hw/core/numa.c b/hw/core/numa.c index 510d096a88..e6050b2273 100644 --- a/hw/core/numa.c +++ b/hw/core/numa.c @@ -756,6 +756,7 @@ static void numa_stat_memory_devices(NumaNodeMem node_mem[]) PCDIMMDeviceInfo *pcdimm_info; VirtioPMEMDeviceInfo *vpi; VirtioMEMDeviceInfo *vmi; + SgxEPCDeviceInfo *se; for (info = info_list; info; info = info->next) { MemoryDeviceInfo *value = info->value; @@ -781,6 +782,12 @@ static void numa_stat_memory_devices(NumaNodeMem node_mem[]) node_mem[vmi->node].node_mem += vmi->size; node_mem[vmi->node].node_plugged_mem += vmi->size; break; + case MEMORY_DEVICE_INFO_KIND_SGX_EPC: + se = value->u.sgx_epc.data; + /* TODO: once we support numa, assign to right node */ + node_mem[0].node_mem += se->size; + node_mem[0].node_plugged_mem += se->size; + break; default: g_assert_not_reached(); } diff --git a/hw/core/qdev-hotplug.c b/hw/core/qdev-hotplug.c new file mode 100644 index 0000000000..d495d0e9c7 --- /dev/null +++ b/hw/core/qdev-hotplug.c @@ -0,0 +1,73 @@ +/* + * QDev Hotplug handlers + * + * Copyright (c) Red Hat + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/qdev-core.h" +#include "hw/boards.h" + +HotplugHandler *qdev_get_machine_hotplug_handler(DeviceState *dev) +{ + MachineState *machine; + MachineClass *mc; + Object *m_obj = qdev_get_machine(); + + if (object_dynamic_cast(m_obj, TYPE_MACHINE)) { + machine = MACHINE(m_obj); + mc = MACHINE_GET_CLASS(machine); + if (mc->get_hotplug_handler) { + return mc->get_hotplug_handler(machine, dev); + } + } + + return NULL; +} + +bool qdev_hotplug_allowed(DeviceState *dev, Error **errp) +{ + MachineState *machine; + MachineClass *mc; + Object *m_obj = qdev_get_machine(); + + if (object_dynamic_cast(m_obj, TYPE_MACHINE)) { + machine = MACHINE(m_obj); + mc = MACHINE_GET_CLASS(machine); + if (mc->hotplug_allowed) { + return mc->hotplug_allowed(machine, dev, errp); + } + } + + return true; +} + +HotplugHandler *qdev_get_bus_hotplug_handler(DeviceState *dev) +{ + if (dev->parent_bus) { + return dev->parent_bus->hotplug_handler; + } + return NULL; +} + +HotplugHandler *qdev_get_hotplug_handler(DeviceState *dev) +{ + HotplugHandler *hotplug_ctrl = qdev_get_machine_hotplug_handler(dev); + + if (hotplug_ctrl == NULL && dev->parent_bus) { + hotplug_ctrl = qdev_get_bus_hotplug_handler(dev); + } + return hotplug_ctrl; +} + +/* can be used as ->unplug() callback for the simple cases */ +void qdev_simple_device_unplug_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + qdev_unrealize(dev); +} diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c index e71f5d64d1..a91f60567a 100644 --- a/hw/core/qdev-properties-system.c +++ b/hw/core/qdev-properties-system.c @@ -431,6 +431,12 @@ static void set_netdev(Object *obj, Visitor *v, const char *name, goto out; } + if (peers[i]->info->check_peer_type) { + if (!peers[i]->info->check_peer_type(peers[i], obj->class, errp)) { + goto out; + } + } + ncs[i] = peers[i]; ncs[i]->queue_index = i; } diff --git a/hw/core/qdev.c b/hw/core/qdev.c index cefc5eaa0a..84f3019440 100644 --- a/hw/core/qdev.c +++ b/hw/core/qdev.c @@ -28,11 +28,11 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qapi/qapi-events-qdev.h" +#include "qapi/qmp/qdict.h" #include "qapi/qmp/qerror.h" #include "qapi/visitor.h" #include "qemu/error-report.h" #include "qemu/option.h" -#include "hw/hotplug.h" #include "hw/irq.h" #include "hw/qdev-properties.h" #include "hw/boards.h" @@ -211,14 +211,17 @@ void device_listener_unregister(DeviceListener *listener) QTAILQ_REMOVE(&device_listeners, listener, link); } -bool qdev_should_hide_device(QemuOpts *opts) +bool qdev_should_hide_device(const QDict *opts, bool from_json, Error **errp) { + ERRP_GUARD(); DeviceListener *listener; QTAILQ_FOREACH(listener, &device_listeners, link) { if (listener->hide_device) { - if (listener->hide_device(listener, opts)) { + if (listener->hide_device(listener, opts, from_json, errp)) { return true; + } else if (*errp) { + return false; } } } @@ -234,58 +237,6 @@ void qdev_set_legacy_instance_id(DeviceState *dev, int alias_id, dev->alias_required_for_version = required_for_version; } -HotplugHandler *qdev_get_machine_hotplug_handler(DeviceState *dev) -{ - MachineState *machine; - MachineClass *mc; - Object *m_obj = qdev_get_machine(); - - if (object_dynamic_cast(m_obj, TYPE_MACHINE)) { - machine = MACHINE(m_obj); - mc = MACHINE_GET_CLASS(machine); - if (mc->get_hotplug_handler) { - return mc->get_hotplug_handler(machine, dev); - } - } - - return NULL; -} - -bool qdev_hotplug_allowed(DeviceState *dev, Error **errp) -{ - MachineState *machine; - MachineClass *mc; - Object *m_obj = qdev_get_machine(); - - if (object_dynamic_cast(m_obj, TYPE_MACHINE)) { - machine = MACHINE(m_obj); - mc = MACHINE_GET_CLASS(machine); - if (mc->hotplug_allowed) { - return mc->hotplug_allowed(machine, dev, errp); - } - } - - return true; -} - -HotplugHandler *qdev_get_bus_hotplug_handler(DeviceState *dev) -{ - if (dev->parent_bus) { - return dev->parent_bus->hotplug_handler; - } - return NULL; -} - -HotplugHandler *qdev_get_hotplug_handler(DeviceState *dev) -{ - HotplugHandler *hotplug_ctrl = qdev_get_machine_hotplug_handler(dev); - - if (hotplug_ctrl == NULL && dev->parent_bus) { - hotplug_ctrl = qdev_get_bus_hotplug_handler(dev); - } - return hotplug_ctrl; -} - static int qdev_prereset(DeviceState *dev, void *opaque) { trace_qdev_reset_tree(dev, object_get_typename(OBJECT(dev))); @@ -367,13 +318,6 @@ static void device_reset_child_foreach(Object *obj, ResettableChildCallback cb, } } -/* can be used as ->unplug() callback for the simple cases */ -void qdev_simple_device_unplug_cb(HotplugHandler *hotplug_dev, - DeviceState *dev, Error **errp) -{ - qdev_unrealize(dev); -} - bool qdev_realize(DeviceState *dev, BusState *bus, Error **errp) { assert(!dev->realized && !dev->parent_bus); @@ -432,180 +376,6 @@ BusState *qdev_get_parent_bus(DeviceState *dev) return dev->parent_bus; } -static NamedGPIOList *qdev_get_named_gpio_list(DeviceState *dev, - const char *name) -{ - NamedGPIOList *ngl; - - QLIST_FOREACH(ngl, &dev->gpios, node) { - /* NULL is a valid and matchable name. */ - if (g_strcmp0(name, ngl->name) == 0) { - return ngl; - } - } - - ngl = g_malloc0(sizeof(*ngl)); - ngl->name = g_strdup(name); - QLIST_INSERT_HEAD(&dev->gpios, ngl, node); - return ngl; -} - -void qdev_init_gpio_in_named_with_opaque(DeviceState *dev, - qemu_irq_handler handler, - void *opaque, - const char *name, int n) -{ - int i; - NamedGPIOList *gpio_list = qdev_get_named_gpio_list(dev, name); - - assert(gpio_list->num_out == 0 || !name); - gpio_list->in = qemu_extend_irqs(gpio_list->in, gpio_list->num_in, handler, - opaque, n); - - if (!name) { - name = "unnamed-gpio-in"; - } - for (i = gpio_list->num_in; i < gpio_list->num_in + n; i++) { - gchar *propname = g_strdup_printf("%s[%u]", name, i); - - object_property_add_child(OBJECT(dev), propname, - OBJECT(gpio_list->in[i])); - g_free(propname); - } - - gpio_list->num_in += n; -} - -void qdev_init_gpio_in(DeviceState *dev, qemu_irq_handler handler, int n) -{ - qdev_init_gpio_in_named(dev, handler, NULL, n); -} - -void qdev_init_gpio_out_named(DeviceState *dev, qemu_irq *pins, - const char *name, int n) -{ - int i; - NamedGPIOList *gpio_list = qdev_get_named_gpio_list(dev, name); - - assert(gpio_list->num_in == 0 || !name); - - if (!name) { - name = "unnamed-gpio-out"; - } - memset(pins, 0, sizeof(*pins) * n); - for (i = 0; i < n; ++i) { - gchar *propname = g_strdup_printf("%s[%u]", name, - gpio_list->num_out + i); - - object_property_add_link(OBJECT(dev), propname, TYPE_IRQ, - (Object **)&pins[i], - object_property_allow_set_link, - OBJ_PROP_LINK_STRONG); - g_free(propname); - } - gpio_list->num_out += n; -} - -void qdev_init_gpio_out(DeviceState *dev, qemu_irq *pins, int n) -{ - qdev_init_gpio_out_named(dev, pins, NULL, n); -} - -qemu_irq qdev_get_gpio_in_named(DeviceState *dev, const char *name, int n) -{ - NamedGPIOList *gpio_list = qdev_get_named_gpio_list(dev, name); - - assert(n >= 0 && n < gpio_list->num_in); - return gpio_list->in[n]; -} - -qemu_irq qdev_get_gpio_in(DeviceState *dev, int n) -{ - return qdev_get_gpio_in_named(dev, NULL, n); -} - -void qdev_connect_gpio_out_named(DeviceState *dev, const char *name, int n, - qemu_irq pin) -{ - char *propname = g_strdup_printf("%s[%d]", - name ? name : "unnamed-gpio-out", n); - if (pin && !OBJECT(pin)->parent) { - /* We need a name for object_property_set_link to work */ - object_property_add_child(container_get(qdev_get_machine(), - "/unattached"), - "non-qdev-gpio[*]", OBJECT(pin)); - } - object_property_set_link(OBJECT(dev), propname, OBJECT(pin), &error_abort); - g_free(propname); -} - -qemu_irq qdev_get_gpio_out_connector(DeviceState *dev, const char *name, int n) -{ - g_autofree char *propname = g_strdup_printf("%s[%d]", - name ? name : "unnamed-gpio-out", n); - - qemu_irq ret = (qemu_irq)object_property_get_link(OBJECT(dev), propname, - NULL); - - return ret; -} - -/* disconnect a GPIO output, returning the disconnected input (if any) */ - -static qemu_irq qdev_disconnect_gpio_out_named(DeviceState *dev, - const char *name, int n) -{ - char *propname = g_strdup_printf("%s[%d]", - name ? name : "unnamed-gpio-out", n); - - qemu_irq ret = (qemu_irq)object_property_get_link(OBJECT(dev), propname, - NULL); - if (ret) { - object_property_set_link(OBJECT(dev), propname, NULL, NULL); - } - g_free(propname); - return ret; -} - -qemu_irq qdev_intercept_gpio_out(DeviceState *dev, qemu_irq icpt, - const char *name, int n) -{ - qemu_irq disconnected = qdev_disconnect_gpio_out_named(dev, name, n); - qdev_connect_gpio_out_named(dev, name, n, icpt); - return disconnected; -} - -void qdev_connect_gpio_out(DeviceState * dev, int n, qemu_irq pin) -{ - qdev_connect_gpio_out_named(dev, NULL, n, pin); -} - -void qdev_pass_gpios(DeviceState *dev, DeviceState *container, - const char *name) -{ - int i; - NamedGPIOList *ngl = qdev_get_named_gpio_list(dev, name); - - for (i = 0; i < ngl->num_in; i++) { - const char *nm = ngl->name ? ngl->name : "unnamed-gpio-in"; - char *propname = g_strdup_printf("%s[%d]", nm, i); - - object_property_add_alias(OBJECT(container), propname, - OBJECT(dev), propname); - g_free(propname); - } - for (i = 0; i < ngl->num_out; i++) { - const char *nm = ngl->name ? ngl->name : "unnamed-gpio-out"; - char *propname = g_strdup_printf("%s[%d]", nm, i); - - object_property_add_alias(OBJECT(container), propname, - OBJECT(dev), propname); - g_free(propname); - } - QLIST_REMOVE(ngl, node); - QLIST_INSERT_HEAD(&container->gpios, ngl, node); -} - BusState *qdev_get_child_bus(DeviceState *dev, const char *name) { BusState *bus; @@ -955,7 +725,8 @@ static void device_finalize(Object *obj) dev->canonical_path = NULL; } - qemu_opts_del(dev->opts); + qobject_unref(dev->opts); + g_free(dev->id); } static void device_class_base_init(ObjectClass *class, void *data) diff --git a/hw/core/sysbus.c b/hw/core/sysbus.c index aaae8e23cc..05c1da3d31 100644 --- a/hw/core/sysbus.c +++ b/hw/core/sysbus.c @@ -340,11 +340,13 @@ static BusState *main_system_bus; static void main_system_bus_create(void) { - /* assign main_system_bus before qbus_create_inplace() - * in order to make "if (bus != sysbus_get_default())" work */ + /* + * assign main_system_bus before qbus_init() + * in order to make "if (bus != sysbus_get_default())" work + */ main_system_bus = g_malloc0(system_bus_info.instance_size); - qbus_create_inplace(main_system_bus, system_bus_info.instance_size, - TYPE_SYSTEM_BUS, NULL, "main-system-bus"); + qbus_init(main_system_bus, system_bus_info.instance_size, + TYPE_SYSTEM_BUS, NULL, "main-system-bus"); OBJECT(main_system_bus)->free = g_free; } diff --git a/hw/display/macfb.c b/hw/display/macfb.c index d8183b9bbd..277d3e6633 100644 --- a/hw/display/macfb.c +++ b/hw/display/macfb.c @@ -20,16 +20,102 @@ #include "qapi/error.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" +#include "trace.h" -#define VIDEO_BASE 0x00001000 +#define VIDEO_BASE 0x0 #define DAFB_BASE 0x00800000 #define MACFB_PAGE_SIZE 4096 #define MACFB_VRAM_SIZE (4 * MiB) -#define DAFB_RESET 0x200 -#define DAFB_LUT 0x213 +#define DAFB_MODE_VADDR1 0x0 +#define DAFB_MODE_VADDR2 0x4 +#define DAFB_MODE_CTRL1 0x8 +#define DAFB_MODE_CTRL2 0xc +#define DAFB_MODE_SENSE 0x1c +#define DAFB_INTR_MASK 0x104 +#define DAFB_INTR_STAT 0x108 +#define DAFB_INTR_CLEAR 0x10c +#define DAFB_RESET 0x200 +#define DAFB_LUT 0x213 +#define DAFB_INTR_VBL 0x4 + +/* Vertical Blank period (60.15Hz) */ +#define DAFB_INTR_VBL_PERIOD_NS 16625800 + +/* + * Quadra sense codes taken from Apple Technical Note HW26: + * "Macintosh Quadra Built-In Video". The sense codes and + * extended sense codes have different meanings: + * + * Sense: + * bit 2: SENSE2 (pin 10) + * bit 1: SENSE1 (pin 7) + * bit 0: SENSE0 (pin 4) + * + * 0 = pin tied to ground + * 1 = pin unconnected + * + * Extended Sense: + * bit 2: pins 4-10 + * bit 1: pins 10-7 + * bit 0: pins 7-4 + * + * 0 = pins tied together + * 1 = pins unconnected + * + * Reads from the sense register appear to be active low, i.e. a 1 indicates + * that the pin is tied to ground, a 0 indicates the pin is disconnected. + * + * Writes to the sense register appear to activate pulldowns i.e. a 1 enables + * a pulldown on a particular pin. + * + * The MacOS toolbox appears to use a series of reads and writes to first + * determine if extended sense is to be used, and then check which pins are + * tied together in order to determine the display type. + */ + +typedef struct MacFbSense { + uint8_t type; + uint8_t sense; + uint8_t ext_sense; +} MacFbSense; + +static MacFbSense macfb_sense_table[] = { + { MACFB_DISPLAY_APPLE_21_COLOR, 0x0, 0 }, + { MACFB_DISPLAY_APPLE_PORTRAIT, 0x1, 0 }, + { MACFB_DISPLAY_APPLE_12_RGB, 0x2, 0 }, + { MACFB_DISPLAY_APPLE_2PAGE_MONO, 0x3, 0 }, + { MACFB_DISPLAY_NTSC_UNDERSCAN, 0x4, 0 }, + { MACFB_DISPLAY_NTSC_OVERSCAN, 0x4, 0 }, + { MACFB_DISPLAY_APPLE_12_MONO, 0x6, 0 }, + { MACFB_DISPLAY_APPLE_13_RGB, 0x6, 0 }, + { MACFB_DISPLAY_16_COLOR, 0x7, 0x3 }, + { MACFB_DISPLAY_PAL1_UNDERSCAN, 0x7, 0x0 }, + { MACFB_DISPLAY_PAL1_OVERSCAN, 0x7, 0x0 }, + { MACFB_DISPLAY_PAL2_UNDERSCAN, 0x7, 0x6 }, + { MACFB_DISPLAY_PAL2_OVERSCAN, 0x7, 0x6 }, + { MACFB_DISPLAY_VGA, 0x7, 0x5 }, + { MACFB_DISPLAY_SVGA, 0x7, 0x5 }, +}; + +static MacFbMode macfb_mode_table[] = { + { MACFB_DISPLAY_VGA, 1, 0x100, 0x71e, 640, 480, 0x400, 0x1000 }, + { MACFB_DISPLAY_VGA, 2, 0x100, 0x70e, 640, 480, 0x400, 0x1000 }, + { MACFB_DISPLAY_VGA, 4, 0x100, 0x706, 640, 480, 0x400, 0x1000 }, + { MACFB_DISPLAY_VGA, 8, 0x100, 0x702, 640, 480, 0x400, 0x1000 }, + { MACFB_DISPLAY_VGA, 24, 0x100, 0x7ff, 640, 480, 0x1000, 0x1000 }, + { MACFB_DISPLAY_VGA, 1, 0xd0 , 0x70e, 800, 600, 0x340, 0xe00 }, + { MACFB_DISPLAY_VGA, 2, 0xd0 , 0x706, 800, 600, 0x340, 0xe00 }, + { MACFB_DISPLAY_VGA, 4, 0xd0 , 0x702, 800, 600, 0x340, 0xe00 }, + { MACFB_DISPLAY_VGA, 8, 0xd0, 0x700, 800, 600, 0x340, 0xe00 }, + { MACFB_DISPLAY_VGA, 24, 0x340, 0x100, 800, 600, 0xd00, 0xe00 }, + { MACFB_DISPLAY_APPLE_21_COLOR, 1, 0x90, 0x506, 1152, 870, 0x240, 0x80 }, + { MACFB_DISPLAY_APPLE_21_COLOR, 2, 0x90, 0x502, 1152, 870, 0x240, 0x80 }, + { MACFB_DISPLAY_APPLE_21_COLOR, 4, 0x90, 0x500, 1152, 870, 0x240, 0x80 }, + { MACFB_DISPLAY_APPLE_21_COLOR, 8, 0x120, 0x5ff, 1152, 870, 0x480, 0x80 }, +}; typedef void macfb_draw_line_func(MacfbState *s, uint8_t *d, uint32_t addr, int width); @@ -49,7 +135,9 @@ static void macfb_draw_line1(MacfbState *s, uint8_t *d, uint32_t addr, for (x = 0; x < width; x++) { int bit = x & 7; int idx = (macfb_read_byte(s, addr) >> (7 - bit)) & 1; - r = g = b = ((1 - idx) << 7); + r = s->color_palette[idx * 3]; + g = s->color_palette[idx * 3 + 1]; + b = s->color_palette[idx * 3 + 2]; addr += (bit == 7); *(uint32_t *)d = rgb_to_pixel32(r, g, b); @@ -143,10 +231,10 @@ static void macfb_draw_line24(MacfbState *s, uint8_t *d, uint32_t addr, int x; for (x = 0; x < width; x++) { - r = macfb_read_byte(s, addr); - g = macfb_read_byte(s, addr + 1); - b = macfb_read_byte(s, addr + 2); - addr += 3; + r = macfb_read_byte(s, addr + 1); + g = macfb_read_byte(s, addr + 2); + b = macfb_read_byte(s, addr + 3); + addr += 4; *(uint32_t *)d = rgb_to_pixel32(r, g, b); d += 4; @@ -187,7 +275,7 @@ static void macfb_draw_graphic(MacfbState *s) ram_addr_t page; uint32_t v = 0; int y, ymin; - int macfb_stride = (s->depth * s->width + 7) / 8; + int macfb_stride = s->mode->stride; macfb_draw_line_func *macfb_draw_line; switch (s->depth) { @@ -219,7 +307,7 @@ static void macfb_draw_graphic(MacfbState *s) DIRTY_MEMORY_VGA); ymin = -1; - page = 0; + page = s->mode->offset; for (y = 0; y < s->height; y++, page += macfb_stride) { if (macfb_check_dirty(s, snap, page, macfb_stride)) { uint8_t *data_display; @@ -252,6 +340,121 @@ static void macfb_invalidate_display(void *opaque) memory_region_set_dirty(&s->mem_vram, 0, MACFB_VRAM_SIZE); } +static uint32_t macfb_sense_read(MacfbState *s) +{ + MacFbSense *macfb_sense; + uint8_t sense; + + assert(s->type < ARRAY_SIZE(macfb_sense_table)); + macfb_sense = &macfb_sense_table[s->type]; + if (macfb_sense->sense == 0x7) { + /* Extended sense */ + sense = 0; + if (!(macfb_sense->ext_sense & 1)) { + /* Pins 7-4 together */ + if (~s->regs[DAFB_MODE_SENSE >> 2] & 3) { + sense = (~s->regs[DAFB_MODE_SENSE >> 2] & 7) | 3; + } + } + if (!(macfb_sense->ext_sense & 2)) { + /* Pins 10-7 together */ + if (~s->regs[DAFB_MODE_SENSE >> 2] & 6) { + sense = (~s->regs[DAFB_MODE_SENSE >> 2] & 7) | 6; + } + } + if (!(macfb_sense->ext_sense & 4)) { + /* Pins 4-10 together */ + if (~s->regs[DAFB_MODE_SENSE >> 2] & 5) { + sense = (~s->regs[DAFB_MODE_SENSE >> 2] & 7) | 5; + } + } + } else { + /* Normal sense */ + sense = (~macfb_sense->sense & 7) | + (~s->regs[DAFB_MODE_SENSE >> 2] & 7); + } + + trace_macfb_sense_read(sense); + return sense; +} + +static void macfb_sense_write(MacfbState *s, uint32_t val) +{ + s->regs[DAFB_MODE_SENSE >> 2] = val; + + trace_macfb_sense_write(val); + return; +} + +static void macfb_update_mode(MacfbState *s) +{ + s->width = s->mode->width; + s->height = s->mode->height; + s->depth = s->mode->depth; + + trace_macfb_update_mode(s->width, s->height, s->depth); + macfb_invalidate_display(s); +} + +static void macfb_mode_write(MacfbState *s) +{ + MacFbMode *macfb_mode; + int i; + + for (i = 0; i < ARRAY_SIZE(macfb_mode_table); i++) { + macfb_mode = &macfb_mode_table[i]; + + if (s->type != macfb_mode->type) { + continue; + } + + if ((s->regs[DAFB_MODE_CTRL1 >> 2] & 0xff) == + (macfb_mode->mode_ctrl1 & 0xff) && + (s->regs[DAFB_MODE_CTRL2 >> 2] & 0xff) == + (macfb_mode->mode_ctrl2 & 0xff)) { + s->mode = macfb_mode; + macfb_update_mode(s); + break; + } + } +} + +static MacFbMode *macfb_find_mode(MacfbDisplayType display_type, + uint16_t width, uint16_t height, + uint8_t depth) +{ + MacFbMode *macfb_mode; + int i; + + for (i = 0; i < ARRAY_SIZE(macfb_mode_table); i++) { + macfb_mode = &macfb_mode_table[i]; + + if (display_type == macfb_mode->type && width == macfb_mode->width && + height == macfb_mode->height && depth == macfb_mode->depth) { + return macfb_mode; + } + } + + return NULL; +} + +static gchar *macfb_mode_list(void) +{ + GString *list = g_string_new(""); + MacFbMode *macfb_mode; + int i; + + for (i = 0; i < ARRAY_SIZE(macfb_mode_table); i++) { + macfb_mode = &macfb_mode_table[i]; + + g_string_append_printf(list, " %dx%dx%d\n", macfb_mode->width, + macfb_mode->height, macfb_mode->depth); + } + + return g_string_free(list, FALSE); +} + + static void macfb_update_display(void *opaque) { MacfbState *s = opaque; @@ -271,6 +474,36 @@ static void macfb_update_display(void *opaque) macfb_draw_graphic(s); } +static void macfb_update_irq(MacfbState *s) +{ + uint32_t irq_state = s->irq_state & s->irq_mask; + + if (irq_state) { + qemu_irq_raise(s->irq); + } else { + qemu_irq_lower(s->irq); + } +} + +static int64_t macfb_next_vbl(void) +{ + return (qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + DAFB_INTR_VBL_PERIOD_NS) / + DAFB_INTR_VBL_PERIOD_NS * DAFB_INTR_VBL_PERIOD_NS; +} + +static void macfb_vbl_timer(void *opaque) +{ + MacfbState *s = opaque; + int64_t next_vbl; + + s->irq_state |= DAFB_INTR_VBL; + macfb_update_irq(s); + + /* 60 Hz irq */ + next_vbl = macfb_next_vbl(); + timer_mod(s->vbl_timer, next_vbl); +} + static void macfb_reset(MacfbState *s) { int i; @@ -289,7 +522,26 @@ static uint64_t macfb_ctrl_read(void *opaque, hwaddr addr, unsigned int size) { - return 0; + MacfbState *s = opaque; + uint64_t val = 0; + + switch (addr) { + case DAFB_MODE_VADDR1: + case DAFB_MODE_VADDR2: + case DAFB_MODE_CTRL1: + case DAFB_MODE_CTRL2: + val = s->regs[addr >> 2]; + break; + case DAFB_INTR_STAT: + val = s->irq_state; + break; + case DAFB_MODE_SENSE: + val = macfb_sense_read(s); + break; + } + + trace_macfb_ctrl_read(addr, val, size); + return val; } static void macfb_ctrl_write(void *opaque, @@ -298,17 +550,52 @@ static void macfb_ctrl_write(void *opaque, unsigned int size) { MacfbState *s = opaque; + int64_t next_vbl; + switch (addr) { + case DAFB_MODE_VADDR1: + case DAFB_MODE_VADDR2: + s->regs[addr >> 2] = val; + break; + case DAFB_MODE_CTRL1 ... DAFB_MODE_CTRL1 + 3: + case DAFB_MODE_CTRL2 ... DAFB_MODE_CTRL2 + 3: + s->regs[addr >> 2] = val; + if (val) { + macfb_mode_write(s); + } + break; + case DAFB_MODE_SENSE: + macfb_sense_write(s, val); + break; + case DAFB_INTR_MASK: + s->irq_mask = val; + if (val & DAFB_INTR_VBL) { + next_vbl = macfb_next_vbl(); + timer_mod(s->vbl_timer, next_vbl); + } else { + timer_del(s->vbl_timer); + } + break; + case DAFB_INTR_CLEAR: + s->irq_state &= ~DAFB_INTR_VBL; + macfb_update_irq(s); + break; case DAFB_RESET: s->palette_current = 0; + s->irq_state &= ~DAFB_INTR_VBL; + macfb_update_irq(s); break; case DAFB_LUT: - s->color_palette[s->palette_current++] = val; + s->color_palette[s->palette_current] = val; + s->palette_current = (s->palette_current + 1) % + ARRAY_SIZE(s->color_palette); if (s->palette_current % 3) { macfb_invalidate_display(s); } break; } + + trace_macfb_ctrl_write(addr, val, size); } static const MemoryRegionOps macfb_ctrl_ops = { @@ -321,7 +608,7 @@ static const MemoryRegionOps macfb_ctrl_ops = { static int macfb_post_load(void *opaque, int version_id) { - macfb_invalidate_display(opaque); + macfb_mode_write(opaque); return 0; } @@ -334,6 +621,7 @@ static const VMStateDescription vmstate_macfb = { .fields = (VMStateField[]) { VMSTATE_UINT8_ARRAY(color_palette, MacfbState, 256 * 3), VMSTATE_UINT32(palette_current, MacfbState), + VMSTATE_UINT32_ARRAY(regs, MacfbState, MACFB_NUM_REGS), VMSTATE_END_OF_LIST() } }; @@ -343,14 +631,20 @@ static const GraphicHwOps macfb_ops = { .gfx_update = macfb_update_display, }; -static void macfb_common_realize(DeviceState *dev, MacfbState *s, Error **errp) +static bool macfb_common_realize(DeviceState *dev, MacfbState *s, Error **errp) { DisplaySurface *surface; - if (s->depth != 1 && s->depth != 2 && s->depth != 4 && s->depth != 8 && - s->depth != 16 && s->depth != 24) { - error_setg(errp, "unknown guest depth %d", s->depth); - return; + s->mode = macfb_find_mode(s->type, s->width, s->height, s->depth); + if (!s->mode) { + gchar *list; + error_setg(errp, "unknown display mode: width %d, height %d, depth %d", + s->width, s->height, s->depth); + list = macfb_mode_list(); + error_append_hint(errp, "Available modes:\n%s", list); + g_free(list); + + return false; } s->con = graphic_console_init(dev, 0, &macfb_ops, s); @@ -359,18 +653,21 @@ static void macfb_common_realize(DeviceState *dev, MacfbState *s, Error **errp) if (surface_bits_per_pixel(surface) != 32) { error_setg(errp, "unknown host depth %d", surface_bits_per_pixel(surface)); - return; + return false; } memory_region_init_io(&s->mem_ctrl, OBJECT(dev), &macfb_ctrl_ops, s, "macfb-ctrl", 0x1000); - memory_region_init_ram_nomigrate(&s->mem_vram, OBJECT(s), "macfb-vram", - MACFB_VRAM_SIZE, errp); + memory_region_init_ram(&s->mem_vram, OBJECT(dev), "macfb-vram", + MACFB_VRAM_SIZE, &error_abort); s->vram = memory_region_get_ram_ptr(&s->mem_vram); s->vram_bit_mask = MACFB_VRAM_SIZE - 1; - vmstate_register_ram(&s->mem_vram, dev); memory_region_set_coalescing(&s->mem_vram); + + s->vbl_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, macfb_vbl_timer, s); + macfb_update_mode(s); + return true; } static void macfb_sysbus_realize(DeviceState *dev, Error **errp) @@ -378,14 +675,23 @@ static void macfb_sysbus_realize(DeviceState *dev, Error **errp) MacfbSysBusState *s = MACFB(dev); MacfbState *ms = &s->macfb; - macfb_common_realize(dev, ms, errp); + if (!macfb_common_realize(dev, ms, errp)) { + return; + } + sysbus_init_mmio(SYS_BUS_DEVICE(s), &ms->mem_ctrl); sysbus_init_mmio(SYS_BUS_DEVICE(s), &ms->mem_vram); + + qdev_init_gpio_out(dev, &ms->irq, 1); } -const uint8_t macfb_rom[] = { - 255, 0, 0, 0, -}; +static void macfb_nubus_set_irq(void *opaque, int n, int level) +{ + MacfbNubusState *s = NUBUS_MACFB(opaque); + NubusDevice *nd = NUBUS_DEVICE(s); + + nubus_set_irq(nd, level); +} static void macfb_nubus_realize(DeviceState *dev, Error **errp) { @@ -395,12 +701,29 @@ static void macfb_nubus_realize(DeviceState *dev, Error **errp) MacfbState *ms = &s->macfb; ndc->parent_realize(dev, errp); + if (*errp) { + return; + } + + if (!macfb_common_realize(dev, ms, errp)) { + return; + } - macfb_common_realize(dev, ms, errp); memory_region_add_subregion(&nd->slot_mem, DAFB_BASE, &ms->mem_ctrl); memory_region_add_subregion(&nd->slot_mem, VIDEO_BASE, &ms->mem_vram); - nubus_register_rom(nd, macfb_rom, sizeof(macfb_rom), 1, 9, 0xf); + ms->irq = qemu_allocate_irq(macfb_nubus_set_irq, s, 0); +} + +static void macfb_nubus_unrealize(DeviceState *dev) +{ + MacfbNubusState *s = NUBUS_MACFB(dev); + MacfbNubusDeviceClass *ndc = NUBUS_MACFB_GET_CLASS(dev); + MacfbState *ms = &s->macfb; + + ndc->parent_unrealize(dev); + + qemu_free_irq(ms->irq); } static void macfb_sysbus_reset(DeviceState *d) @@ -419,6 +742,8 @@ static Property macfb_sysbus_properties[] = { DEFINE_PROP_UINT32("width", MacfbSysBusState, macfb.width, 640), DEFINE_PROP_UINT32("height", MacfbSysBusState, macfb.height, 480), DEFINE_PROP_UINT8("depth", MacfbSysBusState, macfb.depth, 8), + DEFINE_PROP_UINT8("display", MacfbSysBusState, macfb.type, + MACFB_DISPLAY_VGA), DEFINE_PROP_END_OF_LIST(), }; @@ -426,6 +751,8 @@ static Property macfb_nubus_properties[] = { DEFINE_PROP_UINT32("width", MacfbNubusState, macfb.width, 640), DEFINE_PROP_UINT32("height", MacfbNubusState, macfb.height, 480), DEFINE_PROP_UINT8("depth", MacfbNubusState, macfb.depth, 8), + DEFINE_PROP_UINT8("display", MacfbNubusState, macfb.type, + MACFB_DISPLAY_VGA), DEFINE_PROP_END_OF_LIST(), }; @@ -447,6 +774,8 @@ static void macfb_nubus_class_init(ObjectClass *klass, void *data) device_class_set_parent_realize(dc, macfb_nubus_realize, &ndc->parent_realize); + device_class_set_parent_unrealize(dc, macfb_nubus_unrealize, + &ndc->parent_unrealize); dc->desc = "Nubus Macintosh framebuffer"; dc->reset = macfb_nubus_reset; dc->vmsd = &vmstate_macfb; diff --git a/hw/display/trace-events b/hw/display/trace-events index f03f6655bc..3a7a2c957f 100644 --- a/hw/display/trace-events +++ b/hw/display/trace-events @@ -167,3 +167,10 @@ sm501_disp_ctrl_read(uint32_t addr, uint32_t val) "addr=0x%x, val=0x%x" sm501_disp_ctrl_write(uint32_t addr, uint32_t val) "addr=0x%x, val=0x%x" sm501_2d_engine_read(uint32_t addr, uint32_t val) "addr=0x%x, val=0x%x" sm501_2d_engine_write(uint32_t addr, uint32_t val) "addr=0x%x, val=0x%x" + +# macfb.c +macfb_ctrl_read(uint64_t addr, uint64_t value, unsigned int size) "addr 0x%"PRIx64 " value 0x%"PRIx64 " size %u" +macfb_ctrl_write(uint64_t addr, uint64_t value, unsigned int size) "addr 0x%"PRIx64 " value 0x%"PRIx64 " size %u" +macfb_sense_read(uint32_t value) "video sense: 0x%"PRIx32 +macfb_sense_write(uint32_t value) "video sense: 0x%"PRIx32 +macfb_update_mode(uint32_t width, uint32_t height, uint8_t depth) "setting mode to width %"PRId32 " height %"PRId32 " size %d" diff --git a/hw/display/virtio-gpu-udmabuf-stubs.c b/hw/display/virtio-gpu-udmabuf-stubs.c index 81f661441a..f692e13510 100644 --- a/hw/display/virtio-gpu-udmabuf-stubs.c +++ b/hw/display/virtio-gpu-udmabuf-stubs.c @@ -20,7 +20,8 @@ void virtio_gpu_fini_udmabuf(struct virtio_gpu_simple_resource *res) int virtio_gpu_update_dmabuf(VirtIOGPU *g, uint32_t scanout_id, struct virtio_gpu_simple_resource *res, - struct virtio_gpu_framebuffer *fb) + struct virtio_gpu_framebuffer *fb, + struct virtio_gpu_rect *r) { /* nothing (stub) */ return 0; diff --git a/hw/display/virtio-gpu-udmabuf.c b/hw/display/virtio-gpu-udmabuf.c index c6f7f58784..1597921c51 100644 --- a/hw/display/virtio-gpu-udmabuf.c +++ b/hw/display/virtio-gpu-udmabuf.c @@ -171,7 +171,8 @@ static VGPUDMABuf *virtio_gpu_create_dmabuf(VirtIOGPU *g, uint32_t scanout_id, struct virtio_gpu_simple_resource *res, - struct virtio_gpu_framebuffer *fb) + struct virtio_gpu_framebuffer *fb, + struct virtio_gpu_rect *r) { VGPUDMABuf *dmabuf; @@ -183,10 +184,14 @@ static VGPUDMABuf dmabuf->buf.width = fb->width; dmabuf->buf.height = fb->height; dmabuf->buf.stride = fb->stride; + dmabuf->buf.x = r->x; + dmabuf->buf.y = r->y; + dmabuf->buf.scanout_width = r->width; + dmabuf->buf.scanout_height = r->height; dmabuf->buf.fourcc = qemu_pixman_to_drm_format(fb->format); dmabuf->buf.fd = res->dmabuf_fd; dmabuf->buf.allow_fences = true; - + dmabuf->buf.draw_submitted = false; dmabuf->scanout_id = scanout_id; QTAILQ_INSERT_HEAD(&g->dmabuf.bufs, dmabuf, next); @@ -196,24 +201,25 @@ static VGPUDMABuf int virtio_gpu_update_dmabuf(VirtIOGPU *g, uint32_t scanout_id, struct virtio_gpu_simple_resource *res, - struct virtio_gpu_framebuffer *fb) + struct virtio_gpu_framebuffer *fb, + struct virtio_gpu_rect *r) { struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id]; VGPUDMABuf *new_primary, *old_primary = NULL; - new_primary = virtio_gpu_create_dmabuf(g, scanout_id, res, fb); + new_primary = virtio_gpu_create_dmabuf(g, scanout_id, res, fb, r); if (!new_primary) { return -EINVAL; } - if (g->dmabuf.primary) { - old_primary = g->dmabuf.primary; + if (g->dmabuf.primary[scanout_id]) { + old_primary = g->dmabuf.primary[scanout_id]; } - g->dmabuf.primary = new_primary; + g->dmabuf.primary[scanout_id] = new_primary; qemu_console_resize(scanout->con, - new_primary->buf.width, - new_primary->buf.height); + new_primary->buf.scanout_width, + new_primary->buf.scanout_height); dpy_gl_scanout_dmabuf(scanout->con, &new_primary->buf); if (old_primary) { diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index 182e0868b0..d78b9700c7 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -517,9 +517,9 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g, console_has_gl(scanout->con)) { dpy_gl_update(scanout->con, 0, 0, scanout->width, scanout->height); - return; } } + return; } if (!res->blob && @@ -627,7 +627,7 @@ static void virtio_gpu_do_set_scanout(VirtIOGPU *g, if (res->blob) { if (console_has_gl(scanout->con)) { - if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb)) { + if (!virtio_gpu_update_dmabuf(g, scanout_id, res, fb, r)) { virtio_gpu_update_scanout(g, scanout_id, res, r); return; } diff --git a/hw/dma/sifive_pdma.c b/hw/dma/sifive_pdma.c index b4fd40573a..85fe34f5f3 100644 --- a/hw/dma/sifive_pdma.c +++ b/hw/dma/sifive_pdma.c @@ -232,7 +232,7 @@ static void sifive_pdma_write(void *opaque, hwaddr offset, { SiFivePDMAState *s = opaque; int ch = SIFIVE_PDMA_CHAN_NO(offset); - bool claimed; + bool claimed, run; if (ch >= SIFIVE_PDMA_CHANS) { qemu_log_mask(LOG_GUEST_ERROR, "%s: Invalid channel no %d\n", @@ -243,7 +243,8 @@ static void sifive_pdma_write(void *opaque, hwaddr offset, offset &= 0xfff; switch (offset) { case DMA_CONTROL: - claimed = !!s->chan[ch].control & CONTROL_CLAIM; + claimed = !!(s->chan[ch].control & CONTROL_CLAIM); + run = !!(s->chan[ch].control & CONTROL_RUN); if (!claimed && (value & CONTROL_CLAIM)) { /* reset Next* registers */ @@ -254,13 +255,19 @@ static void sifive_pdma_write(void *opaque, hwaddr offset, s->chan[ch].next_src = 0; } + /* claim bit can only be cleared when run is low */ + if (run && !(value & CONTROL_CLAIM)) { + value |= CONTROL_CLAIM; + } + s->chan[ch].control = value; /* * If channel was not claimed before run bit is set, + * or if the channel is disclaimed when run was low, * DMA won't run. */ - if (!claimed) { + if (!claimed || (!run && !(value & CONTROL_CLAIM))) { s->chan[ch].control &= ~CONTROL_RUN; return; } diff --git a/hw/gpio/aspeed_gpio.c b/hw/gpio/aspeed_gpio.c index dfa6d6cb40..911d21c8cf 100644 --- a/hw/gpio/aspeed_gpio.c +++ b/hw/gpio/aspeed_gpio.c @@ -16,11 +16,7 @@ #include "hw/irq.h" #include "migration/vmstate.h" -#define GPIOS_PER_REG 32 -#define GPIOS_PER_SET GPIOS_PER_REG -#define GPIO_PIN_GAP_SIZE 4 #define GPIOS_PER_GROUP 8 -#define GPIO_GROUP_SHIFT 3 /* GPIO Source Types */ #define ASPEED_CMD_SRC_MASK 0x01010101 @@ -259,7 +255,7 @@ static void aspeed_gpio_update(AspeedGPIOState *s, GPIOSets *regs, diff = old ^ new; if (diff) { - for (gpio = 0; gpio < GPIOS_PER_REG; gpio++) { + for (gpio = 0; gpio < ASPEED_GPIOS_PER_SET; gpio++) { uint32_t mask = 1 << gpio; /* If the gpio needs to be updated... */ @@ -283,8 +279,7 @@ static void aspeed_gpio_update(AspeedGPIOState *s, GPIOSets *regs, if (direction & mask) { /* ...trigger the line-state IRQ */ ptrdiff_t set = aspeed_gpio_set_idx(s, regs); - size_t offset = set * GPIOS_PER_SET + gpio; - qemu_set_irq(s->gpios[offset], !!(new & mask)); + qemu_set_irq(s->gpios[set][gpio], !!(new & mask)); } else { /* ...otherwise if we meet the line's current IRQ policy... */ if (aspeed_evaluate_irq(regs, old & mask, gpio)) { @@ -297,21 +292,6 @@ static void aspeed_gpio_update(AspeedGPIOState *s, GPIOSets *regs, qemu_set_irq(s->irq, !!(s->pending)); } -static uint32_t aspeed_adjust_pin(AspeedGPIOState *s, uint32_t pin) -{ - AspeedGPIOClass *agc = ASPEED_GPIO_GET_CLASS(s); - /* - * The 2500 has a 4 pin gap in group AB and the 2400 has a 4 pin - * gap in group Y (and only four pins in AB but this is the last group so - * it doesn't matter). - */ - if (agc->gap && pin >= agc->gap) { - pin += GPIO_PIN_GAP_SIZE; - } - - return pin; -} - static bool aspeed_gpio_get_pin_level(AspeedGPIOState *s, uint32_t set_idx, uint32_t pin) { @@ -367,7 +347,7 @@ static uint32_t update_value_control_source(GPIOSets *regs, uint32_t old_value, uint32_t new_value = 0; /* for each group in set */ - for (i = 0; i < GPIOS_PER_REG; i += GPIOS_PER_GROUP) { + for (i = 0; i < ASPEED_GPIOS_PER_SET; i += GPIOS_PER_GROUP) { cmd_source = extract32(regs->cmd_source_0, i, 1) | (extract32(regs->cmd_source_1, i, 1) << 1); @@ -637,7 +617,7 @@ static void aspeed_gpio_write(void *opaque, hwaddr offset, uint64_t data, * bidirectional | 1 | 1 | data * input only | 1 | 0 | 0 * output only | 0 | 1 | 1 - * no pin / gap | 0 | 0 | 0 + * no pin | 0 | 0 | 0 * * which is captured by: * data = ( data | ~input) & output; @@ -779,7 +759,7 @@ static void aspeed_gpio_set_pin(Object *obj, Visitor *v, const char *name, } /****************** Setup functions ******************/ -static const GPIOSetProperties ast2400_set_props[] = { +static const GPIOSetProperties ast2400_set_props[ASPEED_GPIO_MAX_NR_SETS] = { [0] = {0xffffffff, 0xffffffff, {"A", "B", "C", "D"} }, [1] = {0xffffffff, 0xffffffff, {"E", "F", "G", "H"} }, [2] = {0xffffffff, 0xffffffff, {"I", "J", "K", "L"} }, @@ -789,28 +769,28 @@ static const GPIOSetProperties ast2400_set_props[] = { [6] = {0x0000000f, 0x0fffff0f, {"Y", "Z", "AA", "AB"} }, }; -static const GPIOSetProperties ast2500_set_props[] = { +static const GPIOSetProperties ast2500_set_props[ASPEED_GPIO_MAX_NR_SETS] = { [0] = {0xffffffff, 0xffffffff, {"A", "B", "C", "D"} }, [1] = {0xffffffff, 0xffffffff, {"E", "F", "G", "H"} }, [2] = {0xffffffff, 0xffffffff, {"I", "J", "K", "L"} }, [3] = {0xffffffff, 0xffffffff, {"M", "N", "O", "P"} }, [4] = {0xffffffff, 0xffffffff, {"Q", "R", "S", "T"} }, [5] = {0xffffffff, 0x0000ffff, {"U", "V", "W", "X"} }, - [6] = {0xffffff0f, 0x0fffff0f, {"Y", "Z", "AA", "AB"} }, + [6] = {0x0fffffff, 0x0fffffff, {"Y", "Z", "AA", "AB"} }, [7] = {0x000000ff, 0x000000ff, {"AC"} }, }; -static GPIOSetProperties ast2600_3_3v_set_props[] = { +static GPIOSetProperties ast2600_3_3v_set_props[ASPEED_GPIO_MAX_NR_SETS] = { [0] = {0xffffffff, 0xffffffff, {"A", "B", "C", "D"} }, [1] = {0xffffffff, 0xffffffff, {"E", "F", "G", "H"} }, [2] = {0xffffffff, 0xffffffff, {"I", "J", "K", "L"} }, [3] = {0xffffffff, 0xffffffff, {"M", "N", "O", "P"} }, - [4] = {0xffffffff, 0xffffffff, {"Q", "R", "S", "T"} }, - [5] = {0xffffffff, 0x0000ffff, {"U", "V", "W", "X"} }, - [6] = {0xffff0000, 0x0fff0000, {"Y", "Z", "", ""} }, + [4] = {0xffffffff, 0x00ffffff, {"Q", "R", "S", "T"} }, + [5] = {0xffffffff, 0xffffff00, {"U", "V", "W", "X"} }, + [6] = {0x0000ffff, 0x0000ffff, {"Y", "Z"} }, }; -static GPIOSetProperties ast2600_1_8v_set_props[] = { +static GPIOSetProperties ast2600_1_8v_set_props[ASPEED_GPIO_MAX_NR_SETS] = { [0] = {0xffffffff, 0xffffffff, {"18A", "18B", "18C", "18D"} }, [1] = {0x0000000f, 0x0000000f, {"18E"} }, }; @@ -836,14 +816,20 @@ static void aspeed_gpio_realize(DeviceState *dev, Error **errp) AspeedGPIOState *s = ASPEED_GPIO(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev); AspeedGPIOClass *agc = ASPEED_GPIO_GET_CLASS(s); - int pin; /* Interrupt parent line */ sysbus_init_irq(sbd, &s->irq); /* Individual GPIOs */ - for (pin = 0; pin < agc->nr_gpio_pins; pin++) { - sysbus_init_irq(sbd, &s->gpios[pin]); + for (int i = 0; i < ASPEED_GPIO_MAX_NR_SETS; i++) { + const GPIOSetProperties *props = &agc->props[i]; + uint32_t skip = ~(props->input | props->output); + for (int j = 0; j < ASPEED_GPIOS_PER_SET; j++) { + if (skip >> j & 1) { + continue; + } + sysbus_init_irq(sbd, &s->gpios[i][j]); + } } memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_gpio_ops, s, @@ -856,20 +842,22 @@ static void aspeed_gpio_init(Object *obj) { AspeedGPIOState *s = ASPEED_GPIO(obj); AspeedGPIOClass *agc = ASPEED_GPIO_GET_CLASS(s); - int pin; - for (pin = 0; pin < agc->nr_gpio_pins; pin++) { - char *name; - int set_idx = pin / GPIOS_PER_SET; - int pin_idx = aspeed_adjust_pin(s, pin) - (set_idx * GPIOS_PER_SET); - int group_idx = pin_idx >> GPIO_GROUP_SHIFT; - const GPIOSetProperties *props = &agc->props[set_idx]; - - name = g_strdup_printf("gpio%s%d", props->group_label[group_idx], - pin_idx % GPIOS_PER_GROUP); - object_property_add(obj, name, "bool", aspeed_gpio_get_pin, - aspeed_gpio_set_pin, NULL, NULL); - g_free(name); + for (int i = 0; i < ASPEED_GPIO_MAX_NR_SETS; i++) { + const GPIOSetProperties *props = &agc->props[i]; + uint32_t skip = ~(props->input | props->output); + for (int j = 0; j < ASPEED_GPIOS_PER_SET; j++) { + if (skip >> j & 1) { + continue; + } + int group_idx = j / GPIOS_PER_GROUP; + int pin_idx = j % GPIOS_PER_GROUP; + const char *group = &props->group_label[group_idx][0]; + char *name = g_strdup_printf("gpio%s%d", group, pin_idx); + object_property_add(obj, name, "bool", aspeed_gpio_get_pin, + aspeed_gpio_set_pin, NULL, NULL); + g_free(name); + } } } @@ -926,7 +914,6 @@ static void aspeed_gpio_ast2400_class_init(ObjectClass *klass, void *data) agc->props = ast2400_set_props; agc->nr_gpio_pins = 216; agc->nr_gpio_sets = 7; - agc->gap = 196; agc->reg_table = aspeed_3_3v_gpios; } @@ -937,7 +924,6 @@ static void aspeed_gpio_2500_class_init(ObjectClass *klass, void *data) agc->props = ast2500_set_props; agc->nr_gpio_pins = 228; agc->nr_gpio_sets = 8; - agc->gap = 220; agc->reg_table = aspeed_3_3v_gpios; } diff --git a/hw/gpio/bcm2835_gpio.c b/hw/gpio/bcm2835_gpio.c index abdddbc67c..c995bba1d9 100644 --- a/hw/gpio/bcm2835_gpio.c +++ b/hw/gpio/bcm2835_gpio.c @@ -299,8 +299,7 @@ static void bcm2835_gpio_init(Object *obj) DeviceState *dev = DEVICE(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - qbus_create_inplace(&s->sdbus, sizeof(s->sdbus), - TYPE_SD_BUS, DEVICE(s), "sd-bus"); + qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_SD_BUS, DEVICE(s), "sd-bus"); memory_region_init_io(&s->iomem, obj, &bcm2835_gpio_ops, s, "bcm2835_gpio", 0x1000); diff --git a/hw/hyperv/vmbus.c b/hw/hyperv/vmbus.c index c9887d5a7b..dbce3b35fb 100644 --- a/hw/hyperv/vmbus.c +++ b/hw/hyperv/vmbus.c @@ -2729,7 +2729,7 @@ static void vmbus_bridge_realize(DeviceState *dev, Error **errp) return; } - bridge->bus = VMBUS(qbus_create(TYPE_VMBUS, dev, "vmbus")); + bridge->bus = VMBUS(qbus_new(TYPE_VMBUS, dev, "vmbus")); } static char *vmbus_bridge_ofw_unit_address(const SysBusDevice *dev) diff --git a/hw/i2c/aspeed_i2c.c b/hw/i2c/aspeed_i2c.c index 8d276d9ed3..03a4f5a910 100644 --- a/hw/i2c/aspeed_i2c.c +++ b/hw/i2c/aspeed_i2c.c @@ -740,20 +740,20 @@ static const VMStateDescription aspeed_i2c_vmstate = { static void aspeed_i2c_reset(DeviceState *dev) { - int i; AspeedI2CState *s = ASPEED_I2C(dev); - AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(s); s->intr_status = 0; +} + +static void aspeed_i2c_instance_init(Object *obj) +{ + AspeedI2CState *s = ASPEED_I2C(obj); + AspeedI2CClass *aic = ASPEED_I2C_GET_CLASS(s); + int i; for (i = 0; i < aic->num_busses; i++) { - s->busses[i].intr_ctrl = 0; - s->busses[i].intr_status = 0; - s->busses[i].cmd = 0; - s->busses[i].buf = 0; - s->busses[i].dma_addr = 0; - s->busses[i].dma_len = 0; - i2c_end_transfer(s->busses[i].bus); + object_initialize_child(obj, "bus[*]", &s->busses[i], + TYPE_ASPEED_I2C_BUS); } } @@ -791,17 +791,21 @@ static void aspeed_i2c_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(sbd, &s->iomem); for (i = 0; i < aic->num_busses; i++) { - char name[32]; + Object *bus = OBJECT(&s->busses[i]); int offset = i < aic->gap ? 1 : 5; - sysbus_init_irq(sbd, &s->busses[i].irq); - snprintf(name, sizeof(name), "aspeed.i2c.%d", i); - s->busses[i].controller = s; - s->busses[i].id = i; - s->busses[i].bus = i2c_init_bus(dev, name); - memory_region_init_io(&s->busses[i].mr, OBJECT(dev), - &aspeed_i2c_bus_ops, &s->busses[i], name, - aic->reg_size); + if (!object_property_set_link(bus, "controller", OBJECT(s), errp)) { + return; + } + + if (!object_property_set_uint(bus, "bus-id", i, errp)) { + return; + } + + if (!sysbus_realize(SYS_BUS_DEVICE(bus), errp)) { + return; + } + memory_region_add_subregion(&s->iomem, aic->reg_size * (i + offset), &s->busses[i].mr); } @@ -841,12 +845,72 @@ static void aspeed_i2c_class_init(ObjectClass *klass, void *data) static const TypeInfo aspeed_i2c_info = { .name = TYPE_ASPEED_I2C, .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = aspeed_i2c_instance_init, .instance_size = sizeof(AspeedI2CState), .class_init = aspeed_i2c_class_init, .class_size = sizeof(AspeedI2CClass), .abstract = true, }; +static void aspeed_i2c_bus_reset(DeviceState *dev) +{ + AspeedI2CBus *s = ASPEED_I2C_BUS(dev); + + s->intr_ctrl = 0; + s->intr_status = 0; + s->cmd = 0; + s->buf = 0; + s->dma_addr = 0; + s->dma_len = 0; + i2c_end_transfer(s->bus); +} + +static void aspeed_i2c_bus_realize(DeviceState *dev, Error **errp) +{ + AspeedI2CBus *s = ASPEED_I2C_BUS(dev); + AspeedI2CClass *aic; + g_autofree char *name = g_strdup_printf(TYPE_ASPEED_I2C_BUS ".%d", s->id); + + if (!s->controller) { + error_setg(errp, TYPE_ASPEED_I2C_BUS ": 'controller' link not set"); + return; + } + + aic = ASPEED_I2C_GET_CLASS(s->controller); + + sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq); + + s->bus = i2c_init_bus(dev, name); + + memory_region_init_io(&s->mr, OBJECT(s), &aspeed_i2c_bus_ops, + s, name, aic->reg_size); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mr); +} + +static Property aspeed_i2c_bus_properties[] = { + DEFINE_PROP_UINT8("bus-id", AspeedI2CBus, id, 0), + DEFINE_PROP_LINK("controller", AspeedI2CBus, controller, TYPE_ASPEED_I2C, + AspeedI2CState *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void aspeed_i2c_bus_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "Aspeed I2C Bus"; + dc->realize = aspeed_i2c_bus_realize; + dc->reset = aspeed_i2c_bus_reset; + device_class_set_props(dc, aspeed_i2c_bus_properties); +} + +static const TypeInfo aspeed_i2c_bus_info = { + .name = TYPE_ASPEED_I2C_BUS, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedI2CBus), + .class_init = aspeed_i2c_bus_class_init, +}; + static qemu_irq aspeed_2400_i2c_bus_get_irq(AspeedI2CBus *bus) { return bus->controller->irq; @@ -951,6 +1015,7 @@ static const TypeInfo aspeed_2600_i2c_info = { static void aspeed_i2c_register_types(void) { + type_register_static(&aspeed_i2c_bus_info); type_register_static(&aspeed_i2c_info); type_register_static(&aspeed_2400_i2c_info); type_register_static(&aspeed_2500_i2c_info); diff --git a/hw/i2c/core.c b/hw/i2c/core.c index 416372ad00..0e7d2763b9 100644 --- a/hw/i2c/core.c +++ b/hw/i2c/core.c @@ -60,7 +60,7 @@ I2CBus *i2c_init_bus(DeviceState *parent, const char *name) { I2CBus *bus; - bus = I2C_BUS(qbus_create(TYPE_I2C_BUS, parent, name)); + bus = I2C_BUS(qbus_new(TYPE_I2C_BUS, parent, name)); QLIST_INIT(&bus->current_devs); vmstate_register(NULL, VMSTATE_INSTANCE_ID_ANY, &vmstate_i2c_bus, bus); return bus; diff --git a/hw/i386/Kconfig b/hw/i386/Kconfig index ddedcef0b2..d22ac4a4b9 100644 --- a/hw/i386/Kconfig +++ b/hw/i386/Kconfig @@ -6,6 +6,10 @@ config SEV select X86_FW_OVMF depends on KVM +config SGX + bool + depends on KVM + config PC bool imply APPLESMC @@ -21,6 +25,7 @@ config PC imply PVPANIC_ISA imply QXL imply SEV + imply SGX imply SGA imply TEST_DEVICES imply TPM_CRB @@ -54,6 +59,7 @@ config PC_ACPI select ACPI_X86 select ACPI_CPU_HOTPLUG select ACPI_MEMORY_HOTPLUG + select ACPI_VIOT select SMBUS_EEPROM select PFLASH_CFI01 depends on ACPI_SMBUS diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index dfaa47cdc2..a3ad6abd33 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -68,9 +68,11 @@ #include "qom/qom-qobject.h" #include "hw/i386/amd_iommu.h" #include "hw/i386/intel_iommu.h" +#include "hw/virtio/virtio-iommu.h" #include "hw/acpi/ipmi.h" #include "hw/acpi/hmat.h" +#include "hw/acpi/viot.h" /* These are used to size the ACPI tables for -M pc-i440fx-1.7 and * -M pc-i440fx-2.0. Even if the actual amount of AML generated grows @@ -345,13 +347,23 @@ static void acpi_align_size(GArray *blob, unsigned align) g_array_set_size(blob, ROUND_UP(acpi_data_len(blob), align)); } -/* FACS */ +/* + * ACPI spec 1.0b, + * 5.2.6 Firmware ACPI Control Structure + */ static void build_facs(GArray *table_data) { - AcpiFacsDescriptorRev1 *facs = acpi_data_push(table_data, sizeof *facs); - memcpy(&facs->signature, "FACS", 4); - facs->length = cpu_to_le32(sizeof(*facs)); + const char *sig = "FACS"; + const uint8_t reserved[40] = {}; + + g_array_append_vals(table_data, sig, 4); /* Signature */ + build_append_int_noprefix(table_data, 64, 4); /* Length */ + build_append_int_noprefix(table_data, 0, 4); /* Hardware Signature */ + build_append_int_noprefix(table_data, 0, 4); /* Firmware Waking Vector */ + build_append_int_noprefix(table_data, 0, 4); /* Global Lock */ + build_append_int_noprefix(table_data, 0, 4); /* Flags */ + g_array_append_vals(table_data, reserved, 40); /* Reserved */ } static void build_append_pcihp_notify_entry(Aml *method, int slot) @@ -1405,12 +1417,12 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, #endif int i; VMBusBridge *vmbus_bridge = vmbus_bridge_find(); + AcpiTable table = { .sig = "DSDT", .rev = 1, .oem_id = x86ms->oem_id, + .oem_table_id = x86ms->oem_table_id }; + acpi_table_begin(&table, table_data); dsdt = init_aml_allocator(); - /* Reserve space for header */ - acpi_data_push(dsdt->buf, sizeof(AcpiTableHeader)); - build_dbg_aml(dsdt); if (misc->is_piix4) { sb_scope = aml_scope("_SB"); @@ -1841,74 +1853,113 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, } #endif + if (pcms->sgx_epc.size != 0) { + uint64_t epc_base = pcms->sgx_epc.base; + uint64_t epc_size = pcms->sgx_epc.size; + + dev = aml_device("EPC"); + aml_append(dev, aml_name_decl("_HID", aml_eisaid("INT0E0C"))); + aml_append(dev, aml_name_decl("_STR", + aml_unicode("Enclave Page Cache 1.0"))); + crs = aml_resource_template(); + aml_append(crs, + aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, + AML_MAX_FIXED, AML_NON_CACHEABLE, + AML_READ_WRITE, 0, epc_base, + epc_base + epc_size - 1, 0, epc_size)); + aml_append(dev, aml_name_decl("_CRS", crs)); + + method = aml_method("_STA", 0, AML_NOTSERIALIZED); + aml_append(method, aml_return(aml_int(0x0f))); + aml_append(dev, method); + + aml_append(sb_scope, dev); + } aml_append(dsdt, sb_scope); /* copy AML table into ACPI tables blob and patch header there */ g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len); - build_header(linker, table_data, - (void *)(table_data->data + table_data->len - dsdt->buf->len), - "DSDT", dsdt->buf->len, 1, x86ms->oem_id, x86ms->oem_table_id); + acpi_table_end(linker, &table); free_aml_allocator(); } +/* + * IA-PC HPET (High Precision Event Timers) Specification (Revision: 1.0a) + * 3.2.4The ACPI 2.0 HPET Description Table (HPET) + */ static void build_hpet(GArray *table_data, BIOSLinker *linker, const char *oem_id, const char *oem_table_id) { - Acpi20Hpet *hpet; - int hpet_start = table_data->len; + AcpiTable table = { .sig = "HPET", .rev = 1, + .oem_id = oem_id, .oem_table_id = oem_table_id }; - hpet = acpi_data_push(table_data, sizeof(*hpet)); + acpi_table_begin(&table, table_data); /* Note timer_block_id value must be kept in sync with value advertised by * emulated hpet */ - hpet->timer_block_id = cpu_to_le32(0x8086a201); - hpet->addr.address = cpu_to_le64(HPET_BASE); - build_header(linker, table_data, - (void *)(table_data->data + hpet_start), - "HPET", sizeof(*hpet), 1, oem_id, oem_table_id); + /* Event Timer Block ID */ + build_append_int_noprefix(table_data, 0x8086a201, 4); + /* BASE_ADDRESS */ + build_append_gas(table_data, AML_AS_SYSTEM_MEMORY, 0, 0, 0, HPET_BASE); + /* HPET Number */ + build_append_int_noprefix(table_data, 0, 1); + /* Main Counter Minimum Clock_tick in Periodic Mode */ + build_append_int_noprefix(table_data, 0, 2); + /* Page Protection And OEM Attribute */ + build_append_int_noprefix(table_data, 0, 1); + acpi_table_end(linker, &table); } #ifdef CONFIG_TPM +/* + * TCPA Description Table + * + * Following Level 00, Rev 00.37 of specs: + * http://www.trustedcomputinggroup.org/resources/tcg_acpi_specification + * 7.1.2 ACPI Table Layout + */ static void build_tpm_tcpa(GArray *table_data, BIOSLinker *linker, GArray *tcpalog, const char *oem_id, const char *oem_table_id) { - int tcpa_start = table_data->len; - Acpi20Tcpa *tcpa = acpi_data_push(table_data, sizeof *tcpa); - unsigned log_addr_size = sizeof(tcpa->log_area_start_address); - unsigned log_addr_offset = - (char *)&tcpa->log_area_start_address - table_data->data; + unsigned log_addr_offset; + AcpiTable table = { .sig = "TCPA", .rev = 2, + .oem_id = oem_id, .oem_table_id = oem_table_id }; - tcpa->platform_class = cpu_to_le16(TPM_TCPA_ACPI_CLASS_CLIENT); - tcpa->log_area_minimum_length = cpu_to_le32(TPM_LOG_AREA_MINIMUM_SIZE); - acpi_data_push(tcpalog, le32_to_cpu(tcpa->log_area_minimum_length)); + acpi_table_begin(&table, table_data); + /* Platform Class */ + build_append_int_noprefix(table_data, TPM_TCPA_ACPI_CLASS_CLIENT, 2); + /* Log Area Minimum Length (LAML) */ + build_append_int_noprefix(table_data, TPM_LOG_AREA_MINIMUM_SIZE, 4); + /* Log Area Start Address (LASA) */ + log_addr_offset = table_data->len; + build_append_int_noprefix(table_data, 0, 8); + /* allocate/reserve space for TPM log area */ + acpi_data_push(tcpalog, TPM_LOG_AREA_MINIMUM_SIZE); bios_linker_loader_alloc(linker, ACPI_BUILD_TPMLOG_FILE, tcpalog, 1, false /* high memory */); - /* log area start address to be filled by Guest linker */ - bios_linker_loader_add_pointer(linker, - ACPI_BUILD_TABLE_FILE, log_addr_offset, log_addr_size, - ACPI_BUILD_TPMLOG_FILE, 0); + bios_linker_loader_add_pointer(linker, ACPI_BUILD_TABLE_FILE, + log_addr_offset, 8, ACPI_BUILD_TPMLOG_FILE, 0); - build_header(linker, table_data, - (void *)(table_data->data + tcpa_start), - "TCPA", sizeof(*tcpa), 2, oem_id, oem_table_id); + acpi_table_end(linker, &table); } #endif #define HOLE_640K_START (640 * KiB) #define HOLE_640K_END (1 * MiB) +/* + * ACPI spec, Revision 3.0 + * 5.2.15 System Resource Affinity Table (SRAT) + */ static void build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) { - AcpiSystemResourceAffinityTable *srat; - AcpiSratMemoryAffinity *numamem; - int i; - int srat_start, numa_start, slots; + int numa_mem_start, slots; uint64_t mem_len, mem_base, next_base; MachineClass *mc = MACHINE_GET_CLASS(machine); X86MachineState *x86ms = X86_MACHINE(machine); @@ -1919,45 +1970,53 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) ram_addr_t hotpluggable_address_space_size = object_property_get_int(OBJECT(pcms), PC_MACHINE_DEVMEM_REGION_SIZE, NULL); + AcpiTable table = { .sig = "SRAT", .rev = 1, .oem_id = x86ms->oem_id, + .oem_table_id = x86ms->oem_table_id }; - srat_start = table_data->len; - - srat = acpi_data_push(table_data, sizeof *srat); - srat->reserved1 = cpu_to_le32(1); + acpi_table_begin(&table, table_data); + build_append_int_noprefix(table_data, 1, 4); /* Reserved */ + build_append_int_noprefix(table_data, 0, 8); /* Reserved */ for (i = 0; i < apic_ids->len; i++) { int node_id = apic_ids->cpus[i].props.node_id; uint32_t apic_id = apic_ids->cpus[i].arch_id; if (apic_id < 255) { - AcpiSratProcessorAffinity *core; - - core = acpi_data_push(table_data, sizeof *core); - core->type = ACPI_SRAT_PROCESSOR_APIC; - core->length = sizeof(*core); - core->local_apic_id = apic_id; - core->proximity_lo = node_id; - memset(core->proximity_hi, 0, 3); - core->local_sapic_eid = 0; - core->flags = cpu_to_le32(1); + /* 5.2.15.1 Processor Local APIC/SAPIC Affinity Structure */ + build_append_int_noprefix(table_data, 0, 1); /* Type */ + build_append_int_noprefix(table_data, 16, 1); /* Length */ + /* Proximity Domain [7:0] */ + build_append_int_noprefix(table_data, node_id, 1); + build_append_int_noprefix(table_data, apic_id, 1); /* APIC ID */ + /* Flags, Table 5-36 */ + build_append_int_noprefix(table_data, 1, 4); + build_append_int_noprefix(table_data, 0, 1); /* Local SAPIC EID */ + /* Proximity Domain [31:8] */ + build_append_int_noprefix(table_data, 0, 3); + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ } else { - AcpiSratProcessorX2ApicAffinity *core; - - core = acpi_data_push(table_data, sizeof *core); - core->type = ACPI_SRAT_PROCESSOR_x2APIC; - core->length = sizeof(*core); - core->x2apic_id = cpu_to_le32(apic_id); - core->proximity_domain = cpu_to_le32(node_id); - core->flags = cpu_to_le32(1); + /* + * ACPI spec, Revision 4.0 + * 5.2.16.3 Processor Local x2APIC Affinity Structure + */ + build_append_int_noprefix(table_data, 2, 1); /* Type */ + build_append_int_noprefix(table_data, 24, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + /* Proximity Domain */ + build_append_int_noprefix(table_data, node_id, 4); + build_append_int_noprefix(table_data, apic_id, 4); /* X2APIC ID */ + /* Flags, Table 5-39 */ + build_append_int_noprefix(table_data, 1 /* Enabled */, 4); + build_append_int_noprefix(table_data, 0, 4); /* Clock Domain */ + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ } } - /* the memory map is a bit tricky, it contains at least one hole * from 640k-1M and possibly another one from 3.5G-4G. */ next_base = 0; - numa_start = table_data->len; + numa_mem_start = table_data->len; for (i = 1; i < nb_numa_nodes + 1; ++i) { mem_base = next_base; @@ -1969,8 +2028,7 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) next_base > HOLE_640K_START) { mem_len -= next_base - HOLE_640K_START; if (mem_len > 0) { - numamem = acpi_data_push(table_data, sizeof *numamem); - build_srat_memory(numamem, mem_base, mem_len, i - 1, + build_srat_memory(table_data, mem_base, mem_len, i - 1, MEM_AFFINITY_ENABLED); } @@ -1988,8 +2046,7 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) next_base > x86ms->below_4g_mem_size) { mem_len -= next_base - x86ms->below_4g_mem_size; if (mem_len > 0) { - numamem = acpi_data_push(table_data, sizeof *numamem); - build_srat_memory(numamem, mem_base, mem_len, i - 1, + build_srat_memory(table_data, mem_base, mem_len, i - 1, MEM_AFFINITY_ENABLED); } mem_base = 1ULL << 32; @@ -1998,8 +2055,7 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) } if (mem_len > 0) { - numamem = acpi_data_push(table_data, sizeof *numamem); - build_srat_memory(numamem, mem_base, mem_len, i - 1, + build_srat_memory(table_data, mem_base, mem_len, i - 1, MEM_AFFINITY_ENABLED); } } @@ -2008,10 +2064,15 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) nvdimm_build_srat(table_data); } - slots = (table_data->len - numa_start) / sizeof *numamem; + /* + * TODO: this part is not in ACPI spec and current linux kernel boots fine + * without these entries. But I recall there were issues the last time I + * tried to remove it with some ancient guest OS, however I can't remember + * what that was so keep this around for now + */ + slots = (table_data->len - numa_mem_start) / 40 /* mem affinity len */; for (; slots < nb_numa_nodes + 2; slots++) { - numamem = acpi_data_push(table_data, sizeof *numamem); - build_srat_memory(numamem, 0, 0, 0, MEM_AFFINITY_NOFLAGS); + build_srat_memory(table_data, 0, 0, 0, MEM_AFFINITY_NOFLAGS); } /* @@ -2023,17 +2084,12 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) * providing _PXM method if necessary. */ if (hotpluggable_address_space_size) { - numamem = acpi_data_push(table_data, sizeof *numamem); - build_srat_memory(numamem, machine->device_memory->base, + build_srat_memory(table_data, machine->device_memory->base, hotpluggable_address_space_size, nb_numa_nodes - 1, MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED); } - build_header(linker, table_data, - (void *)(table_data->data + srat_start), - "SRAT", - table_data->len - srat_start, 1, x86ms->oem_id, - x86ms->oem_table_id); + acpi_table_end(linker, &table); } /* @@ -2042,8 +2098,9 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) static void insert_scope(PCIBus *bus, PCIDevice *dev, void *opaque) { + const size_t device_scope_size = 6 /* device scope structure */ + + 2 /* 1 path entry */; GArray *scope_blob = opaque; - AcpiDmarDeviceScope *scope = NULL; if (object_dynamic_cast(OBJECT(dev), TYPE_PCI_BRIDGE)) { /* Dmar Scope Type: 0x02 for PCI Bridge */ @@ -2054,8 +2111,7 @@ insert_scope(PCIBus *bus, PCIDevice *dev, void *opaque) } /* length */ - build_append_int_noprefix(scope_blob, - sizeof(*scope) + sizeof(scope->path[0]), 1); + build_append_int_noprefix(scope_blob, device_scope_size, 1); /* reserved */ build_append_int_noprefix(scope_blob, 0, 2); /* enumeration_id */ @@ -2078,8 +2134,7 @@ dmar_host_bridges(Object *obj, void *opaque) PCIBus *bus = PCI_HOST_BRIDGE(obj)->bus; if (bus && !pci_bus_bypass_iommu(bus)) { - pci_for_each_device(bus, pci_bus_num(bus), insert_scope, - scope_blob); + pci_for_each_device_under_bus(bus, insert_scope, scope_blob); } } @@ -2087,26 +2142,26 @@ dmar_host_bridges(Object *obj, void *opaque) } /* - * VT-d spec 8.1 DMA Remapping Reporting Structure - * (version Oct. 2014 or later) + * Intel ® Virtualization Technology for Directed I/O + * Architecture Specification. Revision 3.3 + * 8.1 DMA Remapping Reporting Structure */ static void build_dmar_q35(GArray *table_data, BIOSLinker *linker, const char *oem_id, const char *oem_table_id) { - int dmar_start = table_data->len; - - AcpiTableDmar *dmar; - AcpiDmarHardwareUnit *drhd; - AcpiDmarRootPortATS *atsr; uint8_t dmar_flags = 0; + uint8_t rsvd10[10] = {}; + /* Root complex IOAPIC uses one path only */ + const size_t ioapic_scope_size = 6 /* device scope structure */ + + 2 /* 1 path entry */; X86IOMMUState *iommu = x86_iommu_get_default(); - AcpiDmarDeviceScope *scope = NULL; - /* Root complex IOAPIC use one path[0] only */ - size_t ioapic_scope_size = sizeof(*scope) + sizeof(scope->path[0]); IntelIOMMUState *intel_iommu = INTEL_IOMMU_DEVICE(iommu); GArray *scope_blob = g_array_new(false, true, 1); + AcpiTable table = { .sig = "DMAR", .rev = 1, .oem_id = oem_id, + .oem_table_id = oem_table_id }; + /* * A PCI bus walk, for each PCI host bridge. * Insert scope for each PCI bridge and endpoint device which @@ -2120,43 +2175,52 @@ build_dmar_q35(GArray *table_data, BIOSLinker *linker, const char *oem_id, dmar_flags |= 0x1; /* Flags: 0x1: INT_REMAP */ } - dmar = acpi_data_push(table_data, sizeof(*dmar)); - dmar->host_address_width = intel_iommu->aw_bits - 1; - dmar->flags = dmar_flags; + acpi_table_begin(&table, table_data); + /* Host Address Width */ + build_append_int_noprefix(table_data, intel_iommu->aw_bits - 1, 1); + build_append_int_noprefix(table_data, dmar_flags, 1); /* Flags */ + g_array_append_vals(table_data, rsvd10, sizeof(rsvd10)); /* Reserved */ - /* DMAR Remapping Hardware Unit Definition structure */ - drhd = acpi_data_push(table_data, sizeof(*drhd) + ioapic_scope_size); - drhd->type = cpu_to_le16(ACPI_DMAR_TYPE_HARDWARE_UNIT); - drhd->length = - cpu_to_le16(sizeof(*drhd) + ioapic_scope_size + scope_blob->len); - drhd->flags = 0; /* Don't include all pci device */ - drhd->pci_segment = cpu_to_le16(0); - drhd->address = cpu_to_le64(Q35_HOST_BRIDGE_IOMMU_ADDR); + /* 8.3 DMAR Remapping Hardware Unit Definition structure */ + build_append_int_noprefix(table_data, 0, 2); /* Type */ + /* Length */ + build_append_int_noprefix(table_data, + 16 + ioapic_scope_size + scope_blob->len, 2); + /* Flags */ + build_append_int_noprefix(table_data, 0 /* Don't include all pci device */ , + 1); + build_append_int_noprefix(table_data, 0 , 1); /* Reserved */ + build_append_int_noprefix(table_data, 0 , 2); /* Segment Number */ + /* Register Base Address */ + build_append_int_noprefix(table_data, Q35_HOST_BRIDGE_IOMMU_ADDR , 8); /* Scope definition for the root-complex IOAPIC. See VT-d spec * 8.3.1 (version Oct. 2014 or later). */ - scope = &drhd->scope[0]; - scope->entry_type = 0x03; /* Type: 0x03 for IOAPIC */ - scope->length = ioapic_scope_size; - scope->enumeration_id = ACPI_BUILD_IOAPIC_ID; - scope->bus = Q35_PSEUDO_BUS_PLATFORM; - scope->path[0].device = PCI_SLOT(Q35_PSEUDO_DEVFN_IOAPIC); - scope->path[0].function = PCI_FUNC(Q35_PSEUDO_DEVFN_IOAPIC); + build_append_int_noprefix(table_data, 0x03 /* IOAPIC */, 1); /* Type */ + build_append_int_noprefix(table_data, ioapic_scope_size, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + /* Enumeration ID */ + build_append_int_noprefix(table_data, ACPI_BUILD_IOAPIC_ID, 1); + /* Start Bus Number */ + build_append_int_noprefix(table_data, Q35_PSEUDO_BUS_PLATFORM, 1); + /* Path, {Device, Function} pair */ + build_append_int_noprefix(table_data, PCI_SLOT(Q35_PSEUDO_DEVFN_IOAPIC), 1); + build_append_int_noprefix(table_data, PCI_FUNC(Q35_PSEUDO_DEVFN_IOAPIC), 1); /* Add scope found above */ g_array_append_vals(table_data, scope_blob->data, scope_blob->len); g_array_free(scope_blob, true); if (iommu->dt_supported) { - atsr = acpi_data_push(table_data, sizeof(*atsr)); - atsr->type = cpu_to_le16(ACPI_DMAR_TYPE_ATSR); - atsr->length = cpu_to_le16(sizeof(*atsr)); - atsr->flags = ACPI_DMAR_ATSR_ALL_PORTS; - atsr->pci_segment = cpu_to_le16(0); + /* 8.5 Root Port ATS Capability Reporting Structure */ + build_append_int_noprefix(table_data, 2, 2); /* Type */ + build_append_int_noprefix(table_data, 8, 2); /* Length */ + build_append_int_noprefix(table_data, 1 /* ALL_PORTS */, 1); /* Flags */ + build_append_int_noprefix(table_data, 0, 1); /* Reserved */ + build_append_int_noprefix(table_data, 0, 2); /* Segment Number */ } - build_header(linker, table_data, (void *)(table_data->data + dmar_start), - "DMAR", table_data->len - dmar_start, 1, oem_id, oem_table_id); + acpi_table_end(linker, &table); } /* @@ -2170,10 +2234,10 @@ static void build_waet(GArray *table_data, BIOSLinker *linker, const char *oem_id, const char *oem_table_id) { - int waet_start = table_data->len; + AcpiTable table = { .sig = "WAET", .rev = 1, .oem_id = oem_id, + .oem_table_id = oem_table_id }; - /* WAET header */ - acpi_data_push(table_data, sizeof(AcpiTableHeader)); + acpi_table_begin(&table, table_data); /* * Set "ACPI PM timer good" flag. * @@ -2182,9 +2246,7 @@ build_waet(GArray *table_data, BIOSLinker *linker, const char *oem_id, * Which avoids costly VMExits caused by guest re-reading it unnecessarily. */ build_append_int_noprefix(table_data, 1 << 1 /* ACPI PM timer good */, 4); - - build_header(linker, table_data, (void *)(table_data->data + waet_start), - "WAET", table_data->len - waet_start, 1, oem_id, oem_table_id); + acpi_table_end(linker, &table); } /* @@ -2278,7 +2340,7 @@ ivrs_host_bridges(Object *obj, void *opaque) PCIBus *bus = PCI_HOST_BRIDGE(obj)->bus; if (bus && !pci_bus_bypass_iommu(bus)) { - pci_for_each_device(bus, pci_bus_num(bus), insert_ivhd, ivhd_blob); + pci_for_each_device_under_bus(bus, insert_ivhd, ivhd_blob); } } @@ -2290,12 +2352,12 @@ build_amd_iommu(GArray *table_data, BIOSLinker *linker, const char *oem_id, const char *oem_table_id) { int ivhd_table_len = 24; - int iommu_start = table_data->len; AMDVIState *s = AMD_IOMMU_DEVICE(x86_iommu_get_default()); GArray *ivhd_blob = g_array_new(false, true, 1); + AcpiTable table = { .sig = "IVRS", .rev = 1, .oem_id = oem_id, + .oem_table_id = oem_table_id }; - /* IVRS header */ - acpi_data_push(table_data, sizeof(AcpiTableHeader)); + acpi_table_begin(&table, table_data); /* IVinfo - IO virtualization information common to all * IOMMU units in a system */ @@ -2380,10 +2442,7 @@ build_amd_iommu(GArray *table_data, BIOSLinker *linker, const char *oem_id, 0x48, /* special device */ 8); } - - build_header(linker, table_data, (void *)(table_data->data + iommu_start), - "IVRS", table_data->len - iommu_start, 1, oem_id, - oem_table_id); + acpi_table_end(linker, &table); } typedef @@ -2430,6 +2489,7 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) PCMachineState *pcms = PC_MACHINE(machine); PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); X86MachineState *x86ms = X86_MACHINE(machine); + DeviceState *iommu = pcms->iommu; GArray *table_offsets; unsigned facs, dsdt, rsdt, fadt; AcpiPmInfo pm; @@ -2546,17 +2606,20 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) build_mcfg(tables_blob, tables->linker, &mcfg, x86ms->oem_id, x86ms->oem_table_id); } - if (x86_iommu_get_default()) { - IommuType IOMMUType = x86_iommu_get_type(); - if (IOMMUType == TYPE_AMD) { - acpi_add_table(table_offsets, tables_blob); - build_amd_iommu(tables_blob, tables->linker, x86ms->oem_id, - x86ms->oem_table_id); - } else if (IOMMUType == TYPE_INTEL) { - acpi_add_table(table_offsets, tables_blob); - build_dmar_q35(tables_blob, tables->linker, x86ms->oem_id, - x86ms->oem_table_id); - } + if (object_dynamic_cast(OBJECT(iommu), TYPE_AMD_IOMMU_DEVICE)) { + acpi_add_table(table_offsets, tables_blob); + build_amd_iommu(tables_blob, tables->linker, x86ms->oem_id, + x86ms->oem_table_id); + } else if (object_dynamic_cast(OBJECT(iommu), TYPE_INTEL_IOMMU_DEVICE)) { + acpi_add_table(table_offsets, tables_blob); + build_dmar_q35(tables_blob, tables->linker, x86ms->oem_id, + x86ms->oem_table_id); + } else if (object_dynamic_cast(OBJECT(iommu), TYPE_VIRTIO_IOMMU_PCI)) { + PCIDevice *pdev = PCI_DEVICE(iommu); + + acpi_add_table(table_offsets, tables_blob); + build_viot(machine, tables_blob, tables->linker, pci_get_bdf(pdev), + x86ms->oem_id, x86ms->oem_table_id); } if (machine->nvdimms_state->is_enabled) { nvdimm_build_acpi(table_offsets, tables_blob, tables->linker, diff --git a/hw/i386/acpi-common.c b/hw/i386/acpi-common.c index 1f5947fcf9..4aaafbdd7b 100644 --- a/hw/i386/acpi-common.c +++ b/hw/i386/acpi-common.c @@ -34,9 +34,13 @@ #include "acpi-common.h" void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid, - const CPUArchIdList *apic_ids, GArray *entry) + const CPUArchIdList *apic_ids, GArray *entry, + bool force_enabled) { uint32_t apic_id = apic_ids->cpus[uid].arch_id; + /* Flags – Local APIC Flags */ + uint32_t flags = apic_ids->cpus[uid].cpu != NULL || force_enabled ? + 1 /* Enabled */ : 0; /* ACPI spec says that LAPIC entry for non present * CPU may be omitted from MADT or it must be marked @@ -45,82 +49,84 @@ void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid, * should be put in MADT but kept disabled. */ if (apic_id < 255) { - AcpiMadtProcessorApic *apic = acpi_data_push(entry, sizeof *apic); - - apic->type = ACPI_APIC_PROCESSOR; - apic->length = sizeof(*apic); - apic->processor_id = uid; - apic->local_apic_id = apic_id; - if (apic_ids->cpus[uid].cpu != NULL) { - apic->flags = cpu_to_le32(1); - } else { - apic->flags = cpu_to_le32(0); - } + /* Rev 1.0b, Table 5-13 Processor Local APIC Structure */ + build_append_int_noprefix(entry, 0, 1); /* Type */ + build_append_int_noprefix(entry, 8, 1); /* Length */ + build_append_int_noprefix(entry, uid, 1); /* ACPI Processor ID */ + build_append_int_noprefix(entry, apic_id, 1); /* APIC ID */ + build_append_int_noprefix(entry, flags, 4); /* Flags */ } else { - AcpiMadtProcessorX2Apic *apic = acpi_data_push(entry, sizeof *apic); - - apic->type = ACPI_APIC_LOCAL_X2APIC; - apic->length = sizeof(*apic); - apic->uid = cpu_to_le32(uid); - apic->x2apic_id = cpu_to_le32(apic_id); - if (apic_ids->cpus[uid].cpu != NULL) { - apic->flags = cpu_to_le32(1); - } else { - apic->flags = cpu_to_le32(0); - } + /* Rev 4.0, 5.2.12.12 Processor Local x2APIC Structure */ + build_append_int_noprefix(entry, 9, 1); /* Type */ + build_append_int_noprefix(entry, 16, 1); /* Length */ + build_append_int_noprefix(entry, 0, 2); /* Reserved */ + build_append_int_noprefix(entry, apic_id, 4); /* X2APIC ID */ + build_append_int_noprefix(entry, flags, 4); /* Flags */ + build_append_int_noprefix(entry, uid, 4); /* ACPI Processor UID */ } } +static void build_ioapic(GArray *entry, uint8_t id, uint32_t addr, uint32_t irq) +{ + /* Rev 1.0b, 5.2.8.2 IO APIC */ + build_append_int_noprefix(entry, 1, 1); /* Type */ + build_append_int_noprefix(entry, 12, 1); /* Length */ + build_append_int_noprefix(entry, id, 1); /* IO APIC ID */ + build_append_int_noprefix(entry, 0, 1); /* Reserved */ + build_append_int_noprefix(entry, addr, 4); /* IO APIC Address */ + build_append_int_noprefix(entry, irq, 4); /* System Vector Base */ +} + +static void +build_xrupt_override(GArray *entry, uint8_t src, uint32_t gsi, uint16_t flags) +{ + /* Rev 1.0b, 5.2.8.3.1 Interrupt Source Overrides */ + build_append_int_noprefix(entry, 2, 1); /* Type */ + build_append_int_noprefix(entry, 10, 1); /* Length */ + build_append_int_noprefix(entry, 0, 1); /* Bus */ + build_append_int_noprefix(entry, src, 1); /* Source */ + /* Global System Interrupt Vector */ + build_append_int_noprefix(entry, gsi, 4); + build_append_int_noprefix(entry, flags, 2); /* Flags */ +} + +/* + * ACPI spec, Revision 1.0b + * 5.2.8 Multiple APIC Description Table + */ void acpi_build_madt(GArray *table_data, BIOSLinker *linker, X86MachineState *x86ms, AcpiDeviceIf *adev, const char *oem_id, const char *oem_table_id) { + int i; + bool x2apic_mode = false; MachineClass *mc = MACHINE_GET_CLASS(x86ms); const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(MACHINE(x86ms)); - int madt_start = table_data->len; AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(adev); - bool x2apic_mode = false; + AcpiTable table = { .sig = "APIC", .rev = 1, .oem_id = oem_id, + .oem_table_id = oem_table_id }; - AcpiMultipleApicTable *madt; - AcpiMadtIoApic *io_apic; - AcpiMadtIntsrcovr *intsrcovr; - int i; - - madt = acpi_data_push(table_data, sizeof *madt); - madt->local_apic_address = cpu_to_le32(APIC_DEFAULT_ADDRESS); - madt->flags = cpu_to_le32(1); + acpi_table_begin(&table, table_data); + /* Local APIC Address */ + build_append_int_noprefix(table_data, APIC_DEFAULT_ADDRESS, 4); + build_append_int_noprefix(table_data, 1 /* PCAT_COMPAT */, 4); /* Flags */ for (i = 0; i < apic_ids->len; i++) { - adevc->madt_cpu(adev, i, apic_ids, table_data); + adevc->madt_cpu(adev, i, apic_ids, table_data, false); if (apic_ids->cpus[i].arch_id > 254) { x2apic_mode = true; } } - io_apic = acpi_data_push(table_data, sizeof *io_apic); - io_apic->type = ACPI_APIC_IO; - io_apic->length = sizeof(*io_apic); - io_apic->io_apic_id = ACPI_BUILD_IOAPIC_ID; - io_apic->address = cpu_to_le32(IO_APIC_DEFAULT_ADDRESS); - io_apic->interrupt = cpu_to_le32(0); - + build_ioapic(table_data, ACPI_BUILD_IOAPIC_ID, IO_APIC_DEFAULT_ADDRESS, 0); if (x86ms->ioapic2) { - AcpiMadtIoApic *io_apic2; - io_apic2 = acpi_data_push(table_data, sizeof *io_apic); - io_apic2->type = ACPI_APIC_IO; - io_apic2->length = sizeof(*io_apic); - io_apic2->io_apic_id = ACPI_BUILD_IOAPIC_ID + 1; - io_apic2->address = cpu_to_le32(IO_APIC_SECONDARY_ADDRESS); - io_apic2->interrupt = cpu_to_le32(IO_APIC_SECONDARY_IRQBASE); + build_ioapic(table_data, ACPI_BUILD_IOAPIC_ID + 1, + IO_APIC_SECONDARY_ADDRESS, IO_APIC_SECONDARY_IRQBASE); } if (x86ms->apic_xrupt_override) { - intsrcovr = acpi_data_push(table_data, sizeof *intsrcovr); - intsrcovr->type = ACPI_APIC_XRUPT_OVERRIDE; - intsrcovr->length = sizeof(*intsrcovr); - intsrcovr->source = 0; - intsrcovr->gsi = cpu_to_le32(2); - intsrcovr->flags = cpu_to_le16(0); /* conforms to bus specifications */ + build_xrupt_override(table_data, 0, 2, + 0 /* Flags: Conforms to the specifications of the bus */); } for (i = 1; i < 16; i++) { @@ -128,36 +134,32 @@ void acpi_build_madt(GArray *table_data, BIOSLinker *linker, /* No need for a INT source override structure. */ continue; } - intsrcovr = acpi_data_push(table_data, sizeof *intsrcovr); - intsrcovr->type = ACPI_APIC_XRUPT_OVERRIDE; - intsrcovr->length = sizeof(*intsrcovr); - intsrcovr->source = i; - intsrcovr->gsi = cpu_to_le32(i); - intsrcovr->flags = cpu_to_le16(0xd); /* active high, level triggered */ + build_xrupt_override(table_data, i, i, + 0xd /* Flags: Active high, Level Triggered */); } if (x2apic_mode) { - AcpiMadtLocalX2ApicNmi *local_nmi; - - local_nmi = acpi_data_push(table_data, sizeof *local_nmi); - local_nmi->type = ACPI_APIC_LOCAL_X2APIC_NMI; - local_nmi->length = sizeof(*local_nmi); - local_nmi->uid = 0xFFFFFFFF; /* all processors */ - local_nmi->flags = cpu_to_le16(0); - local_nmi->lint = 1; /* ACPI_LINT1 */ + /* Rev 4.0, 5.2.12.13 Local x2APIC NMI Structure*/ + build_append_int_noprefix(table_data, 0xA, 1); /* Type */ + build_append_int_noprefix(table_data, 12, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Flags */ + /* ACPI Processor UID */ + build_append_int_noprefix(table_data, 0xFFFFFFFF /* all processors */, + 4); + /* Local x2APIC LINT# */ + build_append_int_noprefix(table_data, 1 /* ACPI_LINT1 */, 1); + build_append_int_noprefix(table_data, 0, 3); /* Reserved */ } else { - AcpiMadtLocalNmi *local_nmi; - - local_nmi = acpi_data_push(table_data, sizeof *local_nmi); - local_nmi->type = ACPI_APIC_LOCAL_NMI; - local_nmi->length = sizeof(*local_nmi); - local_nmi->processor_id = 0xff; /* all processors */ - local_nmi->flags = cpu_to_le16(0); - local_nmi->lint = 1; /* ACPI_LINT1 */ + /* Rev 1.0b, 5.2.8.3.3 Local APIC NMI */ + build_append_int_noprefix(table_data, 4, 1); /* Type */ + build_append_int_noprefix(table_data, 6, 1); /* Length */ + /* ACPI Processor ID */ + build_append_int_noprefix(table_data, 0xFF /* all processors */, 1); + build_append_int_noprefix(table_data, 0, 2); /* Flags */ + /* Local APIC INTI# */ + build_append_int_noprefix(table_data, 1 /* ACPI_LINT1 */, 1); } - build_header(linker, table_data, - (void *)(table_data->data + madt_start), "APIC", - table_data->len - madt_start, 1, oem_id, oem_table_id); + acpi_table_end(linker, &table); } diff --git a/hw/i386/acpi-microvm.c b/hw/i386/acpi-microvm.c index 1a0f77b911..196d318499 100644 --- a/hw/i386/acpi-microvm.c +++ b/hw/i386/acpi-microvm.c @@ -113,16 +113,16 @@ build_dsdt_microvm(GArray *table_data, BIOSLinker *linker, Aml *dsdt, *sb_scope, *scope, *pkg; bool ambiguous; Object *isabus; + AcpiTable table = { .sig = "DSDT", .rev = 2, .oem_id = x86ms->oem_id, + .oem_table_id = x86ms->oem_table_id }; isabus = object_resolve_path_type("", TYPE_ISA_BUS, &ambiguous); assert(isabus); assert(!ambiguous); + acpi_table_begin(&table, table_data); dsdt = init_aml_allocator(); - /* Reserve space for header */ - acpi_data_push(dsdt->buf, sizeof(AcpiTableHeader)); - sb_scope = aml_scope("_SB"); fw_cfg_add_acpi_dsdt(sb_scope, x86ms->fw_cfg); isa_build_aml(ISA_BUS(isabus), sb_scope); @@ -144,11 +144,10 @@ build_dsdt_microvm(GArray *table_data, BIOSLinker *linker, aml_append(scope, aml_name_decl("_S5", pkg)); aml_append(dsdt, scope); - /* copy AML table into ACPI tables blob and patch header there */ + /* copy AML bytecode into ACPI tables blob */ g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len); - build_header(linker, table_data, - (void *)(table_data->data + table_data->len - dsdt->buf->len), - "DSDT", dsdt->buf->len, 2, x86ms->oem_id, x86ms->oem_table_id); + + acpi_table_end(linker, &table); free_aml_allocator(); } diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c index 2801dff97c..91fe34ae58 100644 --- a/hw/i386/amd_iommu.c +++ b/hw/i386/amd_iommu.c @@ -1526,7 +1526,7 @@ static void amdvi_init(AMDVIState *s) AMDVI_MAX_PH_ADDR | AMDVI_MAX_GVA_ADDR | AMDVI_MAX_VA_ADDR); } -static void amdvi_reset(DeviceState *dev) +static void amdvi_sysbus_reset(DeviceState *dev) { AMDVIState *s = AMD_IOMMU_DEVICE(dev); @@ -1534,11 +1534,10 @@ static void amdvi_reset(DeviceState *dev) amdvi_init(s); } -static void amdvi_realize(DeviceState *dev, Error **errp) +static void amdvi_sysbus_realize(DeviceState *dev, Error **errp) { int ret = 0; AMDVIState *s = AMD_IOMMU_DEVICE(dev); - X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev); MachineState *ms = MACHINE(qdev_get_machine()); PCMachineState *pcms = PC_MACHINE(ms); X86MachineState *x86ms = X86_MACHINE(ms); @@ -1548,7 +1547,6 @@ static void amdvi_realize(DeviceState *dev, Error **errp) amdvi_uint64_equal, g_free, g_free); /* This device should take care of IOMMU PCI properties */ - x86_iommu->type = TYPE_AMD; if (!qdev_realize(DEVICE(&s->pci), &bus->qbus, errp)) { return; } @@ -1585,27 +1583,27 @@ static void amdvi_realize(DeviceState *dev, Error **errp) amdvi_init(s); } -static const VMStateDescription vmstate_amdvi = { +static const VMStateDescription vmstate_amdvi_sysbus = { .name = "amd-iommu", .unmigratable = 1 }; -static void amdvi_instance_init(Object *klass) +static void amdvi_sysbus_instance_init(Object *klass) { AMDVIState *s = AMD_IOMMU_DEVICE(klass); object_initialize(&s->pci, sizeof(s->pci), TYPE_AMD_IOMMU_PCI); } -static void amdvi_class_init(ObjectClass *klass, void* data) +static void amdvi_sysbus_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); X86IOMMUClass *dc_class = X86_IOMMU_DEVICE_CLASS(klass); - dc->reset = amdvi_reset; - dc->vmsd = &vmstate_amdvi; + dc->reset = amdvi_sysbus_reset; + dc->vmsd = &vmstate_amdvi_sysbus; dc->hotpluggable = false; - dc_class->realize = amdvi_realize; + dc_class->realize = amdvi_sysbus_realize; dc_class->int_remap = amdvi_int_remap; /* Supported by the pc-q35-* machine types */ dc->user_creatable = true; @@ -1613,18 +1611,27 @@ static void amdvi_class_init(ObjectClass *klass, void* data) dc->desc = "AMD IOMMU (AMD-Vi) DMA Remapping device"; } -static const TypeInfo amdvi = { +static const TypeInfo amdvi_sysbus = { .name = TYPE_AMD_IOMMU_DEVICE, .parent = TYPE_X86_IOMMU_DEVICE, .instance_size = sizeof(AMDVIState), - .instance_init = amdvi_instance_init, - .class_init = amdvi_class_init + .instance_init = amdvi_sysbus_instance_init, + .class_init = amdvi_sysbus_class_init }; -static const TypeInfo amdviPCI = { +static void amdvi_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->desc = "AMD IOMMU (AMD-Vi) DMA Remapping device"; +} + +static const TypeInfo amdvi_pci = { .name = TYPE_AMD_IOMMU_PCI, .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(AMDVIPCIState), + .class_init = amdvi_pci_class_init, .interfaces = (InterfaceInfo[]) { { INTERFACE_CONVENTIONAL_PCI_DEVICE }, { }, @@ -1645,11 +1652,11 @@ static const TypeInfo amdvi_iommu_memory_region_info = { .class_init = amdvi_iommu_memory_region_class_init, }; -static void amdviPCI_register_types(void) +static void amdvi_register_types(void) { - type_register_static(&amdviPCI); - type_register_static(&amdvi); + type_register_static(&amdvi_pci); + type_register_static(&amdvi_sysbus); type_register_static(&amdvi_iommu_memory_region_info); } -type_init(amdviPCI_register_types); +type_init(amdvi_register_types); diff --git a/hw/i386/fw_cfg.c b/hw/i386/fw_cfg.c index 4e68d5dea4..a283785a8d 100644 --- a/hw/i386/fw_cfg.c +++ b/hw/i386/fw_cfg.c @@ -159,7 +159,7 @@ void fw_cfg_build_feature_control(MachineState *ms, FWCfgState *fw_cfg) { X86CPU *cpu = X86_CPU(ms->possible_cpus->cpus[0].cpu); CPUX86State *env = &cpu->env; - uint32_t unused, ecx, edx; + uint32_t unused, ebx, ecx, edx; uint64_t feature_control_bits = 0; uint64_t *val; @@ -174,6 +174,16 @@ void fw_cfg_build_feature_control(MachineState *ms, FWCfgState *fw_cfg) feature_control_bits |= FEATURE_CONTROL_LMCE; } + if (env->cpuid_level >= 7) { + cpu_x86_cpuid(env, 0x7, 0, &unused, &ebx, &ecx, &unused); + if (ebx & CPUID_7_0_EBX_SGX) { + feature_control_bits |= FEATURE_CONTROL_SGX; + } + if (ecx & CPUID_7_0_ECX_SGX_LC) { + feature_control_bits |= FEATURE_CONTROL_SGX_LC; + } + } + if (!feature_control_bits) { return; } diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index 75f075547f..294499ee20 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -1105,7 +1105,7 @@ static int vtd_page_walk_one(IOMMUTLBEvent *event, vtd_page_walk_info *info) .translated_addr = entry->translated_addr, .perm = entry->perm, }; - DMAMap *mapped = iova_tree_find(as->iova_tree, &target); + const DMAMap *mapped = iova_tree_find(as->iova_tree, &target); if (event->type == IOMMU_NOTIFIER_UNMAP && !info->notify_unmap) { trace_vtd_page_walk_one_skip_unmap(entry->iova, entry->addr_mask); @@ -3806,9 +3806,6 @@ static void vtd_realize(DeviceState *dev, Error **errp) X86MachineState *x86ms = X86_MACHINE(ms); PCIBus *bus = pcms->bus; IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev); - X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev); - - x86_iommu->type = TYPE_INTEL; if (!vtd_decide_config(s, errp)) { return; diff --git a/hw/i386/kvm/i8254.c b/hw/i386/kvm/i8254.c index fa68669e8a..191a26fa57 100644 --- a/hw/i386/kvm/i8254.c +++ b/hw/i386/kvm/i8254.c @@ -59,11 +59,6 @@ struct KVMPITClass { DeviceRealize parent_realize; }; -static int64_t abs64(int64_t v) -{ - return v < 0 ? -v : v; -} - static void kvm_pit_update_clock_offset(KVMPITState *s) { int64_t offset, clock_offset; @@ -81,7 +76,7 @@ static void kvm_pit_update_clock_offset(KVMPITState *s) clock_gettime(CLOCK_MONOTONIC, &ts); offset -= ts.tv_nsec; offset -= (int64_t)ts.tv_sec * 1000000000; - if (abs64(offset) < abs64(clock_offset)) { + if (uabs64(offset) < uabs64(clock_offset)) { clock_offset = offset; } } diff --git a/hw/i386/meson.build b/hw/i386/meson.build index 80dad29f2b..213e2e82b3 100644 --- a/hw/i386/meson.build +++ b/hw/i386/meson.build @@ -11,11 +11,13 @@ i386_ss.add(when: 'CONFIG_X86_IOMMU', if_true: files('x86-iommu.c'), if_false: files('x86-iommu-stub.c')) i386_ss.add(when: 'CONFIG_AMD_IOMMU', if_true: files('amd_iommu.c')) i386_ss.add(when: 'CONFIG_I440FX', if_true: files('pc_piix.c')) -i386_ss.add(when: 'CONFIG_MICROVM', if_true: files('microvm.c', 'acpi-microvm.c')) +i386_ss.add(when: 'CONFIG_MICROVM', if_true: files('microvm.c', 'acpi-microvm.c', 'microvm-dt.c')) i386_ss.add(when: 'CONFIG_Q35', if_true: files('pc_q35.c')) i386_ss.add(when: 'CONFIG_VMMOUSE', if_true: files('vmmouse.c')) i386_ss.add(when: 'CONFIG_VMPORT', if_true: files('vmport.c')) i386_ss.add(when: 'CONFIG_VTD', if_true: files('intel_iommu.c')) +i386_ss.add(when: 'CONFIG_SGX', if_true: files('sgx-epc.c','sgx.c'), + if_false: files('sgx-stub.c')) i386_ss.add(when: 'CONFIG_ACPI', if_true: files('acpi-common.c')) i386_ss.add(when: 'CONFIG_ACPI_HW_REDUCED', if_true: files('generic_event_device_x86.c')) diff --git a/hw/i386/microvm-dt.c b/hw/i386/microvm-dt.c new file mode 100644 index 0000000000..875ba91963 --- /dev/null +++ b/hw/i386/microvm-dt.c @@ -0,0 +1,341 @@ +/* + * microvm device tree support + * + * This generates an device tree for microvm and exports it via fw_cfg + * as "etc/fdt" to the firmware (edk2 specifically). + * + * The use case is to allow edk2 find the pcie ecam and the virtio + * devices, without adding an ACPI parser, reusing the fdt parser + * which is needed anyway for the arm platform. + * + * Note 1: The device tree is incomplete. CPUs and memory is missing + * for example, those can be detected using other fw_cfg files. + * Also pci ecam irq routing is not there, edk2 doesn't use + * interrupts. + * + * Note 2: This is for firmware only. OSes should use the more + * complete ACPI tables for hardware discovery. + * + * ---------------------------------------------------------------------- + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ +#include "qemu/osdep.h" +#include "qemu/cutils.h" +#include "sysemu/device_tree.h" +#include "hw/char/serial.h" +#include "hw/i386/fw_cfg.h" +#include "hw/rtc/mc146818rtc.h" +#include "hw/sysbus.h" +#include "hw/virtio/virtio-mmio.h" +#include "hw/usb/xhci.h" + +#include "microvm-dt.h" + +static bool debug; + +static void dt_add_microvm_irq(MicrovmMachineState *mms, + const char *nodename, uint32_t irq) +{ + int index = 0; + + if (irq >= IO_APIC_SECONDARY_IRQBASE) { + irq -= IO_APIC_SECONDARY_IRQBASE; + index++; + } + + qemu_fdt_setprop_cell(mms->fdt, nodename, "interrupt-parent", + mms->ioapic_phandle[index]); + qemu_fdt_setprop_cells(mms->fdt, nodename, "interrupts", irq, 0); +} + +static void dt_add_virtio(MicrovmMachineState *mms, VirtIOMMIOProxy *mmio) +{ + SysBusDevice *dev = SYS_BUS_DEVICE(mmio); + VirtioBusState *mmio_virtio_bus = &mmio->bus; + BusState *mmio_bus = &mmio_virtio_bus->parent_obj; + char *nodename; + + if (QTAILQ_EMPTY(&mmio_bus->children)) { + return; + } + + hwaddr base = dev->mmio[0].addr; + hwaddr size = 512; + unsigned index = (base - VIRTIO_MMIO_BASE) / size; + uint32_t irq = mms->virtio_irq_base + index; + + nodename = g_strdup_printf("/virtio_mmio@%" PRIx64, base); + qemu_fdt_add_subnode(mms->fdt, nodename); + qemu_fdt_setprop_string(mms->fdt, nodename, "compatible", "virtio,mmio"); + qemu_fdt_setprop_sized_cells(mms->fdt, nodename, "reg", 2, base, 2, size); + qemu_fdt_setprop(mms->fdt, nodename, "dma-coherent", NULL, 0); + dt_add_microvm_irq(mms, nodename, irq); + g_free(nodename); +} + +static void dt_add_xhci(MicrovmMachineState *mms) +{ + const char compat[] = "generic-xhci"; + uint32_t irq = MICROVM_XHCI_IRQ; + hwaddr base = MICROVM_XHCI_BASE; + hwaddr size = XHCI_LEN_REGS; + char *nodename; + + nodename = g_strdup_printf("/usb@%" PRIx64, base); + qemu_fdt_add_subnode(mms->fdt, nodename); + qemu_fdt_setprop(mms->fdt, nodename, "compatible", compat, sizeof(compat)); + qemu_fdt_setprop_sized_cells(mms->fdt, nodename, "reg", 2, base, 2, size); + qemu_fdt_setprop(mms->fdt, nodename, "dma-coherent", NULL, 0); + dt_add_microvm_irq(mms, nodename, irq); + g_free(nodename); +} + +static void dt_add_pcie(MicrovmMachineState *mms) +{ + hwaddr base = PCIE_MMIO_BASE; + int nr_pcie_buses; + char *nodename; + + nodename = g_strdup_printf("/pcie@%" PRIx64, base); + qemu_fdt_add_subnode(mms->fdt, nodename); + qemu_fdt_setprop_string(mms->fdt, nodename, + "compatible", "pci-host-ecam-generic"); + qemu_fdt_setprop_string(mms->fdt, nodename, "device_type", "pci"); + qemu_fdt_setprop_cell(mms->fdt, nodename, "#address-cells", 3); + qemu_fdt_setprop_cell(mms->fdt, nodename, "#size-cells", 2); + qemu_fdt_setprop_cell(mms->fdt, nodename, "linux,pci-domain", 0); + qemu_fdt_setprop(mms->fdt, nodename, "dma-coherent", NULL, 0); + + qemu_fdt_setprop_sized_cells(mms->fdt, nodename, "reg", + 2, PCIE_ECAM_BASE, 2, PCIE_ECAM_SIZE); + if (mms->gpex.mmio64.size) { + qemu_fdt_setprop_sized_cells(mms->fdt, nodename, "ranges", + + 1, FDT_PCI_RANGE_MMIO, + 2, mms->gpex.mmio32.base, + 2, mms->gpex.mmio32.base, + 2, mms->gpex.mmio32.size, + + 1, FDT_PCI_RANGE_MMIO_64BIT, + 2, mms->gpex.mmio64.base, + 2, mms->gpex.mmio64.base, + 2, mms->gpex.mmio64.size); + } else { + qemu_fdt_setprop_sized_cells(mms->fdt, nodename, "ranges", + + 1, FDT_PCI_RANGE_MMIO, + 2, mms->gpex.mmio32.base, + 2, mms->gpex.mmio32.base, + 2, mms->gpex.mmio32.size); + } + + nr_pcie_buses = PCIE_ECAM_SIZE / PCIE_MMCFG_SIZE_MIN; + qemu_fdt_setprop_cells(mms->fdt, nodename, "bus-range", 0, + nr_pcie_buses - 1); +} + +static void dt_add_ioapic(MicrovmMachineState *mms, SysBusDevice *dev) +{ + hwaddr base = dev->mmio[0].addr; + char *nodename; + uint32_t ph; + int index; + + switch (base) { + case IO_APIC_DEFAULT_ADDRESS: + index = 0; + break; + case IO_APIC_SECONDARY_ADDRESS: + index = 1; + break; + default: + fprintf(stderr, "unknown ioapic @ %" PRIx64 "\n", base); + return; + } + + nodename = g_strdup_printf("/ioapic%d@%" PRIx64, index + 1, base); + qemu_fdt_add_subnode(mms->fdt, nodename); + qemu_fdt_setprop_string(mms->fdt, nodename, + "compatible", "intel,ce4100-ioapic"); + qemu_fdt_setprop(mms->fdt, nodename, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(mms->fdt, nodename, "#interrupt-cells", 0x2); + qemu_fdt_setprop_cell(mms->fdt, nodename, "#address-cells", 0x2); + qemu_fdt_setprop_sized_cells(mms->fdt, nodename, "reg", + 2, base, 2, 0x1000); + + ph = qemu_fdt_alloc_phandle(mms->fdt); + qemu_fdt_setprop_cell(mms->fdt, nodename, "phandle", ph); + qemu_fdt_setprop_cell(mms->fdt, nodename, "linux,phandle", ph); + mms->ioapic_phandle[index] = ph; + + g_free(nodename); +} + +static void dt_add_isa_serial(MicrovmMachineState *mms, ISADevice *dev) +{ + const char compat[] = "ns16550"; + uint32_t irq = object_property_get_int(OBJECT(dev), "irq", NULL); + hwaddr base = object_property_get_int(OBJECT(dev), "iobase", NULL); + hwaddr size = 8; + char *nodename; + + nodename = g_strdup_printf("/serial@%" PRIx64, base); + qemu_fdt_add_subnode(mms->fdt, nodename); + qemu_fdt_setprop(mms->fdt, nodename, "compatible", compat, sizeof(compat)); + qemu_fdt_setprop_sized_cells(mms->fdt, nodename, "reg", 2, base, 2, size); + dt_add_microvm_irq(mms, nodename, irq); + + if (base == 0x3f8 /* com1 */) { + qemu_fdt_setprop_string(mms->fdt, "/chosen", "stdout-path", nodename); + } + + g_free(nodename); +} + +static void dt_add_isa_rtc(MicrovmMachineState *mms, ISADevice *dev) +{ + const char compat[] = "motorola,mc146818"; + uint32_t irq = RTC_ISA_IRQ; + hwaddr base = RTC_ISA_BASE; + hwaddr size = 8; + char *nodename; + + nodename = g_strdup_printf("/rtc@%" PRIx64, base); + qemu_fdt_add_subnode(mms->fdt, nodename); + qemu_fdt_setprop(mms->fdt, nodename, "compatible", compat, sizeof(compat)); + qemu_fdt_setprop_sized_cells(mms->fdt, nodename, "reg", 2, base, 2, size); + dt_add_microvm_irq(mms, nodename, irq); + g_free(nodename); +} + +static void dt_setup_isa_bus(MicrovmMachineState *mms, DeviceState *bridge) +{ + BusState *bus = qdev_get_child_bus(bridge, "isa.0"); + BusChild *kid; + Object *obj; + + QTAILQ_FOREACH(kid, &bus->children, sibling) { + DeviceState *dev = kid->child; + + /* serial */ + obj = object_dynamic_cast(OBJECT(dev), TYPE_ISA_SERIAL); + if (obj) { + dt_add_isa_serial(mms, ISA_DEVICE(obj)); + continue; + } + + /* rtc */ + obj = object_dynamic_cast(OBJECT(dev), TYPE_MC146818_RTC); + if (obj) { + dt_add_isa_rtc(mms, ISA_DEVICE(obj)); + continue; + } + + if (debug) { + fprintf(stderr, "%s: unhandled: %s\n", __func__, + object_get_typename(OBJECT(dev))); + } + } +} + +static void dt_setup_sys_bus(MicrovmMachineState *mms) +{ + BusState *bus; + BusChild *kid; + Object *obj; + + /* sysbus devices */ + bus = sysbus_get_default(); + QTAILQ_FOREACH(kid, &bus->children, sibling) { + DeviceState *dev = kid->child; + + /* ioapic */ + obj = object_dynamic_cast(OBJECT(dev), TYPE_IOAPIC); + if (obj) { + dt_add_ioapic(mms, SYS_BUS_DEVICE(obj)); + continue; + } + } + + QTAILQ_FOREACH(kid, &bus->children, sibling) { + DeviceState *dev = kid->child; + + /* virtio */ + obj = object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MMIO); + if (obj) { + dt_add_virtio(mms, VIRTIO_MMIO(obj)); + continue; + } + + /* xhci */ + obj = object_dynamic_cast(OBJECT(dev), TYPE_XHCI_SYSBUS); + if (obj) { + dt_add_xhci(mms); + continue; + } + + /* pcie */ + obj = object_dynamic_cast(OBJECT(dev), TYPE_GPEX_HOST); + if (obj) { + dt_add_pcie(mms); + continue; + } + + /* isa */ + obj = object_dynamic_cast(OBJECT(dev), "isabus-bridge"); + if (obj) { + dt_setup_isa_bus(mms, DEVICE(obj)); + continue; + } + + if (debug) { + obj = object_dynamic_cast(OBJECT(dev), TYPE_IOAPIC); + if (obj) { + /* ioapic already added in first pass */ + continue; + } + fprintf(stderr, "%s: unhandled: %s\n", __func__, + object_get_typename(OBJECT(dev))); + } + } +} + +void dt_setup_microvm(MicrovmMachineState *mms) +{ + X86MachineState *x86ms = X86_MACHINE(mms); + int size = 0; + + mms->fdt = create_device_tree(&size); + + /* root node */ + qemu_fdt_setprop_string(mms->fdt, "/", "compatible", "linux,microvm"); + qemu_fdt_setprop_cell(mms->fdt, "/", "#address-cells", 0x2); + qemu_fdt_setprop_cell(mms->fdt, "/", "#size-cells", 0x2); + + qemu_fdt_add_subnode(mms->fdt, "/chosen"); + dt_setup_sys_bus(mms); + + /* add to fw_cfg */ + fprintf(stderr, "%s: add etc/fdt to fw_cfg\n", __func__); + fw_cfg_add_file(x86ms->fw_cfg, "etc/fdt", mms->fdt, size); + + if (debug) { + fprintf(stderr, "%s: writing microvm.fdt\n", __func__); + g_file_set_contents("microvm.fdt", mms->fdt, size, NULL); + int ret = system("dtc -I dtb -O dts microvm.fdt"); + if (ret != 0) { + fprintf(stderr, "%s: oops, dtc not installed?\n", __func__); + } + } +} diff --git a/hw/i386/microvm-dt.h b/hw/i386/microvm-dt.h new file mode 100644 index 0000000000..77c79cbdd9 --- /dev/null +++ b/hw/i386/microvm-dt.h @@ -0,0 +1,8 @@ +#ifndef HW_I386_MICROVM_DT_H +#define HW_I386_MICROVM_DT_H + +#include "hw/i386/microvm.h" + +void dt_setup_microvm(MicrovmMachineState *mms); + +#endif diff --git a/hw/i386/microvm.c b/hw/i386/microvm.c index f257ec5a0b..4b3b1dd262 100644 --- a/hw/i386/microvm.c +++ b/hw/i386/microvm.c @@ -28,6 +28,7 @@ #include "sysemu/reset.h" #include "sysemu/runstate.h" #include "acpi-microvm.h" +#include "microvm-dt.h" #include "hw/loader.h" #include "hw/irq.h" @@ -331,7 +332,7 @@ static void microvm_memory_init(MicrovmMachineState *mms) rom_set_fw(fw_cfg); if (machine->kernel_filename != NULL) { - x86_load_linux(x86ms, fw_cfg, 0, true, true); + x86_load_linux(x86ms, fw_cfg, 0, true); } if (mms->option_roms) { @@ -626,6 +627,7 @@ static void microvm_machine_done(Notifier *notifier, void *data) machine_done); acpi_setup_microvm(mms); + dt_setup_microvm(mms); } static void microvm_powerdown_req(Notifier *notifier, void *data) @@ -667,6 +669,7 @@ static void microvm_machine_initfn(Object *obj) static void microvm_class_init(ObjectClass *oc, void *data) { + X86MachineClass *x86mc = X86_MACHINE_CLASS(oc); MachineClass *mc = MACHINE_CLASS(oc); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); @@ -697,6 +700,8 @@ static void microvm_class_init(ObjectClass *oc, void *data) hc->unplug_request = microvm_device_unplug_request_cb; hc->unplug = microvm_device_unplug_cb; + x86mc->fwcfg_dma_enabled = true; + object_class_property_add(oc, MICROVM_MACHINE_PIC, "OnOffAuto", microvm_machine_get_pic, microvm_machine_set_pic, diff --git a/hw/i386/multiboot.c b/hw/i386/multiboot.c index 9e7d69d470..0a10089f14 100644 --- a/hw/i386/multiboot.c +++ b/hw/i386/multiboot.c @@ -143,7 +143,8 @@ static void mb_add_mod(MultibootState *s, s->mb_mods_count++; } -int load_multiboot(FWCfgState *fw_cfg, +int load_multiboot(X86MachineState *x86ms, + FWCfgState *fw_cfg, FILE *f, const char *kernel_filename, const char *initrd_filename, @@ -151,6 +152,7 @@ int load_multiboot(FWCfgState *fw_cfg, int kernel_file_size, uint8_t *header) { + bool multiboot_dma_enabled = X86_MACHINE_GET_CLASS(x86ms)->fwcfg_dma_enabled; int i, is_multiboot = 0; uint32_t flags = 0; uint32_t mh_entry_addr; @@ -401,7 +403,11 @@ int load_multiboot(FWCfgState *fw_cfg, fw_cfg_add_bytes(fw_cfg, FW_CFG_INITRD_DATA, mb_bootinfo_data, sizeof(bootinfo)); - option_rom[nb_option_roms].name = "multiboot.bin"; + if (multiboot_dma_enabled) { + option_rom[nb_option_roms].name = "multiboot_dma.bin"; + } else { + option_rom[nb_option_roms].name = "multiboot.bin"; + } option_rom[nb_option_roms].bootindex = 0; nb_option_roms++; diff --git a/hw/i386/multiboot.h b/hw/i386/multiboot.h index 60de309cd1..2b9182a8ea 100644 --- a/hw/i386/multiboot.h +++ b/hw/i386/multiboot.h @@ -2,8 +2,10 @@ #define QEMU_MULTIBOOT_H #include "hw/nvram/fw_cfg.h" +#include "hw/i386/x86.h" -int load_multiboot(FWCfgState *fw_cfg, +int load_multiboot(X86MachineState *x86ms, + FWCfgState *fw_cfg, FILE *f, const char *kernel_filename, const char *initrd_filename, diff --git a/hw/i386/pc.c b/hw/i386/pc.c index 557d49c9f8..2592a82148 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -83,6 +83,7 @@ #include "hw/i386/intel_iommu.h" #include "hw/net/ne2000-isa.h" #include "standard-headers/asm-x86/bootparam.h" +#include "hw/virtio/virtio-iommu.h" #include "hw/virtio/virtio-pmem-pci.h" #include "hw/virtio/virtio-mem-pci.h" #include "hw/mem/memory-device.h" @@ -93,7 +94,11 @@ #include "trace.h" #include CONFIG_DEVICES -GlobalProperty pc_compat_6_1[] = {}; +GlobalProperty pc_compat_6_1[] = { + { TYPE_X86_CPU, "hv-version-id-build", "0x1bbc" }, + { TYPE_X86_CPU, "hv-version-id-major", "0x0006" }, + { TYPE_X86_CPU, "hv-version-id-minor", "0x0001" }, +}; const size_t pc_compat_6_1_len = G_N_ELEMENTS(pc_compat_6_1); GlobalProperty pc_compat_6_0[] = { @@ -710,67 +715,6 @@ void pc_acpi_smi_interrupt(void *opaque, int irq, int level) } } -/* - * This function is very similar to smp_parse() - * in hw/core/machine.c but includes CPU die support. - */ -static void pc_smp_parse(MachineState *ms, SMPConfiguration *config, Error **errp) -{ - unsigned cpus = config->has_cpus ? config->cpus : 0; - unsigned sockets = config->has_sockets ? config->sockets : 0; - unsigned dies = config->has_dies ? config->dies : 1; - unsigned cores = config->has_cores ? config->cores : 0; - unsigned threads = config->has_threads ? config->threads : 0; - - /* compute missing values, prefer sockets over cores over threads */ - if (cpus == 0 || sockets == 0) { - cores = cores > 0 ? cores : 1; - threads = threads > 0 ? threads : 1; - if (cpus == 0) { - sockets = sockets > 0 ? sockets : 1; - cpus = cores * threads * dies * sockets; - } else { - ms->smp.max_cpus = config->has_maxcpus ? config->maxcpus : cpus; - sockets = ms->smp.max_cpus / (cores * threads * dies); - } - } else if (cores == 0) { - threads = threads > 0 ? threads : 1; - cores = cpus / (sockets * dies * threads); - cores = cores > 0 ? cores : 1; - } else if (threads == 0) { - threads = cpus / (cores * dies * sockets); - threads = threads > 0 ? threads : 1; - } else if (sockets * dies * cores * threads < cpus) { - error_setg(errp, "cpu topology: " - "sockets (%u) * dies (%u) * cores (%u) * threads (%u) < " - "smp_cpus (%u)", - sockets, dies, cores, threads, cpus); - return; - } - - ms->smp.max_cpus = config->has_maxcpus ? config->maxcpus : cpus; - - if (ms->smp.max_cpus < cpus) { - error_setg(errp, "maxcpus must be equal to or greater than smp"); - return; - } - - if (sockets * dies * cores * threads != ms->smp.max_cpus) { - error_setg(errp, "Invalid CPU topology deprecated: " - "sockets (%u) * dies (%u) * cores (%u) * threads (%u) " - "!= maxcpus (%u)", - sockets, dies, cores, threads, - ms->smp.max_cpus); - return; - } - - ms->smp.cpus = cpus; - ms->smp.cores = cores; - ms->smp.threads = threads; - ms->smp.sockets = sockets; - ms->smp.dies = dies; -} - static void pc_machine_done(Notifier *notifier, void *data) { @@ -832,12 +776,13 @@ void xen_load_linux(PCMachineState *pcms) rom_set_fw(fw_cfg); x86_load_linux(x86ms, fw_cfg, pcmc->acpi_data_size, - pcmc->pvh_enabled, pcmc->linuxboot_dma_enabled); + pcmc->pvh_enabled); for (i = 0; i < nb_option_roms; i++) { assert(!strcmp(option_rom[i].name, "linuxboot.bin") || !strcmp(option_rom[i].name, "linuxboot_dma.bin") || !strcmp(option_rom[i].name, "pvh.bin") || - !strcmp(option_rom[i].name, "multiboot.bin")); + !strcmp(option_rom[i].name, "multiboot.bin") || + !strcmp(option_rom[i].name, "multiboot_dma.bin")); rom_add_option(option_rom[i].name, option_rom[i].bootindex); } x86ms->fw_cfg = fw_cfg; @@ -889,6 +834,10 @@ void pc_memory_init(PCMachineState *pcms, e820_add_entry(0x100000000ULL, x86ms->above_4g_mem_size, E820_RAM); } + if (pcms->sgx_epc.size != 0) { + e820_add_entry(pcms->sgx_epc.base, pcms->sgx_epc.size, E820_RESERVED); + } + if (!pcmc->has_reserved_memory && (machine->ram_slots || (machine->maxram_size > machine->ram_size))) { @@ -919,8 +868,15 @@ void pc_memory_init(PCMachineState *pcms, exit(EXIT_FAILURE); } + if (pcms->sgx_epc.size != 0) { + machine->device_memory->base = sgx_epc_above_4g_end(&pcms->sgx_epc); + } else { + machine->device_memory->base = + 0x100000000ULL + x86ms->above_4g_mem_size; + } + machine->device_memory->base = - ROUND_UP(0x100000000ULL + x86ms->above_4g_mem_size, 1 * GiB); + ROUND_UP(machine->device_memory->base, 1 * GiB); if (pcmc->enforce_aligned_dimm) { /* size device region assuming 1G page max alignment per slot */ @@ -973,7 +929,7 @@ void pc_memory_init(PCMachineState *pcms, if (linux_boot) { x86_load_linux(x86ms, fw_cfg, pcmc->acpi_data_size, - pcmc->pvh_enabled, pcmc->linuxboot_dma_enabled); + pcmc->pvh_enabled); } for (i = 0; i < nb_option_roms; i++) { @@ -1005,6 +961,8 @@ uint64_t pc_pci_hole64_start(void) if (!pcmc->broken_reserved_end) { hole64_start += memory_region_size(&ms->device_memory->mr); } + } else if (pcms->sgx_epc.size != 0) { + hole64_start = sgx_epc_above_4g_end(&pcms->sgx_epc); } else { hole64_start = 0x100000000ULL + x86ms->above_4g_mem_size; } @@ -1374,6 +1332,27 @@ static void pc_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev, } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_PMEM_PCI) || object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI)) { pc_virtio_md_pci_pre_plug(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) { + /* Declare the APIC range as the reserved MSI region */ + char *resv_prop_str = g_strdup_printf("0xfee00000:0xfeefffff:%d", + VIRTIO_IOMMU_RESV_MEM_T_MSI); + + object_property_set_uint(OBJECT(dev), "len-reserved-regions", 1, errp); + object_property_set_str(OBJECT(dev), "reserved-regions[0]", + resv_prop_str, errp); + g_free(resv_prop_str); + } + + if (object_dynamic_cast(OBJECT(dev), TYPE_X86_IOMMU_DEVICE) || + object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) { + PCMachineState *pcms = PC_MACHINE(hotplug_dev); + + if (pcms->iommu) { + error_setg(errp, "QEMU does not support multiple vIOMMUs " + "for x86 yet."); + return; + } + pcms->iommu = dev; } } @@ -1428,7 +1407,9 @@ static HotplugHandler *pc_get_hotplug_handler(MachineState *machine, if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) || object_dynamic_cast(OBJECT(dev), TYPE_CPU) || object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_PMEM_PCI) || - object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI)) { + object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_MEM_PCI) || + object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI) || + object_dynamic_cast(OBJECT(dev), TYPE_X86_IOMMU_DEVICE)) { return HOTPLUG_HANDLER(machine); } @@ -1708,7 +1689,6 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) /* BIOS ACPI tables: 128K. Other BIOS datastructures: less than 4K reported * to be used at the moment, 32K should be enough for a while. */ pcmc->acpi_data_size = 0x20000 + 0x8000; - pcmc->linuxboot_dma_enabled = true; pcmc->pvh_enabled = true; pcmc->kvmclock_create_always = true; assert(!mc->get_hotplug_handler); @@ -1721,7 +1701,6 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) mc->auto_enable_numa_with_memdev = true; mc->has_hotpluggable_cpus = true; mc->default_boot_order = "cad"; - mc->smp_parse = pc_smp_parse; mc->block_default_type = IF_IDE; mc->max_cpus = 255; mc->reset = pc_machine_reset; @@ -1732,6 +1711,7 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) hc->unplug = pc_machine_device_unplug_cb; mc->default_cpu_type = TARGET_DEFAULT_CPU_TYPE; mc->nvdimm_supported = true; + mc->smp_props.dies_supported = true; mc->default_ram_id = "pc.ram"; object_class_property_add(oc, PC_MACHINE_MAX_RAM_BELOW_4G, "size", @@ -1762,7 +1742,7 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) object_class_property_add_bool(oc, "hpet", pc_machine_get_hpet, pc_machine_set_hpet); - object_class_property_add_bool(oc, "default_bus_bypass_iommu", + object_class_property_add_bool(oc, "default-bus-bypass-iommu", pc_machine_get_default_bus_bypass_iommu, pc_machine_set_default_bus_bypass_iommu); diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c index c5da7739ce..223dd3e05d 100644 --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@ -153,6 +153,7 @@ static void pc_init1(MachineState *machine, } } + pc_machine_init_sgx_epc(pcms); x86_cpus_init(x86ms, pcmc->default_cpu_version); if (pcmc->kvmclock_enabled) { @@ -431,6 +432,7 @@ static void pc_i440fx_6_1_machine_options(MachineClass *m) m->is_default = false; compat_props_add(m->compat_props, hw_compat_6_1, hw_compat_6_1_len); compat_props_add(m->compat_props, pc_compat_6_1, pc_compat_6_1_len); + m->smp_props.prefer_sockets = true; } DEFINE_I440FX_MACHINE(v6_1, "pc-i440fx-6.1", NULL, @@ -618,11 +620,12 @@ DEFINE_I440FX_MACHINE(v2_7, "pc-i440fx-2.7", NULL, static void pc_i440fx_2_6_machine_options(MachineClass *m) { + X86MachineClass *x86mc = X86_MACHINE_CLASS(m); PCMachineClass *pcmc = PC_MACHINE_CLASS(m); pc_i440fx_2_7_machine_options(m); pcmc->legacy_cpu_hotplug = true; - pcmc->linuxboot_dma_enabled = false; + x86mc->fwcfg_dma_enabled = false; compat_props_add(m->compat_props, hw_compat_2_6, hw_compat_2_6_len); compat_props_add(m->compat_props, pc_compat_2_6, pc_compat_2_6_len); } diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c index 46cd542d17..797e09500b 100644 --- a/hw/i386/pc_q35.c +++ b/hw/i386/pc_q35.c @@ -177,6 +177,7 @@ static void pc_q35_init(MachineState *machine) x86ms->below_4g_mem_size = machine->ram_size; } + pc_machine_init_sgx_epc(pcms); x86_cpus_init(x86ms, pcmc->default_cpu_version); kvmclock_create(pcmc->kvmclock_create_always); @@ -371,6 +372,7 @@ static void pc_q35_6_1_machine_options(MachineClass *m) m->alias = NULL; compat_props_add(m->compat_props, hw_compat_6_1, hw_compat_6_1_len); compat_props_add(m->compat_props, pc_compat_6_1, pc_compat_6_1_len); + m->smp_props.prefer_sockets = true; } DEFINE_Q35_MACHINE(v6_1, "pc-q35-6.1", NULL, @@ -571,11 +573,12 @@ DEFINE_Q35_MACHINE(v2_7, "pc-q35-2.7", NULL, static void pc_q35_2_6_machine_options(MachineClass *m) { + X86MachineClass *x86mc = X86_MACHINE_CLASS(m); PCMachineClass *pcmc = PC_MACHINE_CLASS(m); pc_q35_2_7_machine_options(m); pcmc->legacy_cpu_hotplug = true; - pcmc->linuxboot_dma_enabled = false; + x86mc->fwcfg_dma_enabled = false; compat_props_add(m->compat_props, hw_compat_2_6, hw_compat_2_6_len); compat_props_add(m->compat_props, pc_compat_2_6, pc_compat_2_6_len); } diff --git a/hw/i386/pc_sysfw.c b/hw/i386/pc_sysfw.c index 68d6b1f783..c8b17af953 100644 --- a/hw/i386/pc_sysfw.c +++ b/hw/i386/pc_sysfw.c @@ -37,7 +37,7 @@ #include "hw/qdev-properties.h" #include "hw/block/flash.h" #include "sysemu/kvm.h" -#include "sysemu/sev.h" +#include "sev.h" #define FLASH_SECTOR_SIZE 4096 diff --git a/hw/i386/sgx-epc.c b/hw/i386/sgx-epc.c new file mode 100644 index 0000000000..e508827e78 --- /dev/null +++ b/hw/i386/sgx-epc.c @@ -0,0 +1,185 @@ +/* + * SGX EPC device + * + * Copyright (C) 2019 Intel Corporation + * + * Authors: + * Sean Christopherson + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "hw/i386/pc.h" +#include "hw/i386/sgx-epc.h" +#include "hw/mem/memory-device.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "target/i386/cpu.h" +#include "exec/address-spaces.h" + +static Property sgx_epc_properties[] = { + DEFINE_PROP_UINT64(SGX_EPC_ADDR_PROP, SGXEPCDevice, addr, 0), + DEFINE_PROP_LINK(SGX_EPC_MEMDEV_PROP, SGXEPCDevice, hostmem, + TYPE_MEMORY_BACKEND_EPC, HostMemoryBackendEpc *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void sgx_epc_get_size(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + Error *local_err = NULL; + uint64_t value; + + value = memory_device_get_region_size(MEMORY_DEVICE(obj), &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + visit_type_uint64(v, name, &value, errp); +} + +static void sgx_epc_init(Object *obj) +{ + object_property_add(obj, SGX_EPC_SIZE_PROP, "uint64", sgx_epc_get_size, + NULL, NULL, NULL); +} + +static void sgx_epc_realize(DeviceState *dev, Error **errp) +{ + PCMachineState *pcms = PC_MACHINE(qdev_get_machine()); + X86MachineState *x86ms = X86_MACHINE(pcms); + MemoryDeviceState *md = MEMORY_DEVICE(dev); + SGXEPCState *sgx_epc = &pcms->sgx_epc; + SGXEPCDevice *epc = SGX_EPC(dev); + HostMemoryBackend *hostmem; + const char *path; + + if (x86ms->boot_cpus != 0) { + error_setg(errp, "'" TYPE_SGX_EPC "' can't be created after vCPUs," + "e.g. via -device"); + return; + } + + if (!epc->hostmem) { + error_setg(errp, "'" SGX_EPC_MEMDEV_PROP "' property is not set"); + return; + } + hostmem = MEMORY_BACKEND(epc->hostmem); + if (host_memory_backend_is_mapped(hostmem)) { + path = object_get_canonical_path_component(OBJECT(hostmem)); + error_setg(errp, "can't use already busy memdev: %s", path); + return; + } + + epc->addr = sgx_epc->base + sgx_epc->size; + + memory_region_add_subregion(&sgx_epc->mr, epc->addr - sgx_epc->base, + host_memory_backend_get_memory(hostmem)); + + host_memory_backend_set_mapped(hostmem, true); + + sgx_epc->sections = g_renew(SGXEPCDevice *, sgx_epc->sections, + sgx_epc->nr_sections + 1); + sgx_epc->sections[sgx_epc->nr_sections++] = epc; + + sgx_epc->size += memory_device_get_region_size(md, errp); +} + +static void sgx_epc_unrealize(DeviceState *dev) +{ + SGXEPCDevice *epc = SGX_EPC(dev); + HostMemoryBackend *hostmem = MEMORY_BACKEND(epc->hostmem); + + host_memory_backend_set_mapped(hostmem, false); +} + +static uint64_t sgx_epc_md_get_addr(const MemoryDeviceState *md) +{ + const SGXEPCDevice *epc = SGX_EPC(md); + + return epc->addr; +} + +static void sgx_epc_md_set_addr(MemoryDeviceState *md, uint64_t addr, + Error **errp) +{ + object_property_set_uint(OBJECT(md), SGX_EPC_ADDR_PROP, addr, errp); +} + +static uint64_t sgx_epc_md_get_plugged_size(const MemoryDeviceState *md, + Error **errp) +{ + return 0; +} + +static MemoryRegion *sgx_epc_md_get_memory_region(MemoryDeviceState *md, + Error **errp) +{ + SGXEPCDevice *epc = SGX_EPC(md); + HostMemoryBackend *hostmem; + + if (!epc->hostmem) { + error_setg(errp, "'" SGX_EPC_MEMDEV_PROP "' property must be set"); + return NULL; + } + + hostmem = MEMORY_BACKEND(epc->hostmem); + return host_memory_backend_get_memory(hostmem); +} + +static void sgx_epc_md_fill_device_info(const MemoryDeviceState *md, + MemoryDeviceInfo *info) +{ + SgxEPCDeviceInfo *se = g_new0(SgxEPCDeviceInfo, 1); + SGXEPCDevice *epc = SGX_EPC(md); + + se->memaddr = epc->addr; + se->size = object_property_get_uint(OBJECT(epc), SGX_EPC_SIZE_PROP, + NULL); + se->memdev = object_get_canonical_path(OBJECT(epc->hostmem)); + + info->u.sgx_epc.data = se; + info->type = MEMORY_DEVICE_INFO_KIND_SGX_EPC; +} + +static void sgx_epc_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + MemoryDeviceClass *mdc = MEMORY_DEVICE_CLASS(oc); + + dc->hotpluggable = false; + dc->realize = sgx_epc_realize; + dc->unrealize = sgx_epc_unrealize; + dc->desc = "SGX EPC section"; + dc->user_creatable = false; + device_class_set_props(dc, sgx_epc_properties); + + mdc->get_addr = sgx_epc_md_get_addr; + mdc->set_addr = sgx_epc_md_set_addr; + mdc->get_plugged_size = sgx_epc_md_get_plugged_size; + mdc->get_memory_region = sgx_epc_md_get_memory_region; + mdc->fill_device_info = sgx_epc_md_fill_device_info; +} + +static TypeInfo sgx_epc_info = { + .name = TYPE_SGX_EPC, + .parent = TYPE_DEVICE, + .instance_size = sizeof(SGXEPCDevice), + .instance_init = sgx_epc_init, + .class_init = sgx_epc_class_init, + .class_size = sizeof(DeviceClass), + .interfaces = (InterfaceInfo[]) { + { TYPE_MEMORY_DEVICE }, + { } + }, +}; + +static void sgx_epc_register_types(void) +{ + type_register_static(&sgx_epc_info); +} + +type_init(sgx_epc_register_types) diff --git a/hw/i386/sgx-stub.c b/hw/i386/sgx-stub.c new file mode 100644 index 0000000000..c9b379e665 --- /dev/null +++ b/hw/i386/sgx-stub.c @@ -0,0 +1,34 @@ +#include "qemu/osdep.h" +#include "monitor/monitor.h" +#include "monitor/hmp-target.h" +#include "hw/i386/pc.h" +#include "hw/i386/sgx-epc.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-misc-target.h" + +SGXInfo *qmp_query_sgx(Error **errp) +{ + error_setg(errp, "SGX support is not compiled in"); + return NULL; +} + +SGXInfo *qmp_query_sgx_capabilities(Error **errp) +{ + error_setg(errp, "SGX support is not compiled in"); + return NULL; +} + +void hmp_info_sgx(Monitor *mon, const QDict *qdict) +{ + monitor_printf(mon, "SGX is not available in this QEMU\n"); +} + +void pc_machine_init_sgx_epc(PCMachineState *pcms) +{ + memset(&pcms->sgx_epc, 0, sizeof(SGXEPCState)); +} + +bool sgx_epc_get_section(int section_nr, uint64_t *addr, uint64_t *size) +{ + g_assert_not_reached(); +} diff --git a/hw/i386/sgx.c b/hw/i386/sgx.c new file mode 100644 index 0000000000..8fef3dd8fa --- /dev/null +++ b/hw/i386/sgx.c @@ -0,0 +1,243 @@ +/* + * SGX common code + * + * Copyright (C) 2021 Intel Corporation + * + * Authors: + * Yang Zhong + * Sean Christopherson + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "hw/i386/pc.h" +#include "hw/i386/sgx-epc.h" +#include "hw/mem/memory-device.h" +#include "monitor/qdev.h" +#include "monitor/monitor.h" +#include "monitor/hmp-target.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-misc-target.h" +#include "exec/address-spaces.h" +#include "sysemu/hw_accel.h" +#include "sysemu/reset.h" +#include + +#define SGX_MAX_EPC_SECTIONS 8 +#define SGX_CPUID_EPC_INVALID 0x0 + +/* A valid EPC section. */ +#define SGX_CPUID_EPC_SECTION 0x1 +#define SGX_CPUID_EPC_MASK 0xF + +#define SGX_MAGIC 0xA4 +#define SGX_IOC_VEPC_REMOVE_ALL _IO(SGX_MAGIC, 0x04) + +#define RETRY_NUM 2 + +static uint64_t sgx_calc_section_metric(uint64_t low, uint64_t high) +{ + return (low & MAKE_64BIT_MASK(12, 20)) + + ((high & MAKE_64BIT_MASK(0, 20)) << 32); +} + +static uint64_t sgx_calc_host_epc_section_size(void) +{ + uint32_t i, type; + uint32_t eax, ebx, ecx, edx; + uint64_t size = 0; + + for (i = 0; i < SGX_MAX_EPC_SECTIONS; i++) { + host_cpuid(0x12, i + 2, &eax, &ebx, &ecx, &edx); + + type = eax & SGX_CPUID_EPC_MASK; + if (type == SGX_CPUID_EPC_INVALID) { + break; + } + + if (type != SGX_CPUID_EPC_SECTION) { + break; + } + + size += sgx_calc_section_metric(ecx, edx); + } + + return size; +} + +static void sgx_epc_reset(void *opaque) +{ + PCMachineState *pcms = PC_MACHINE(qdev_get_machine()); + HostMemoryBackend *hostmem; + SGXEPCDevice *epc; + int failures; + int fd, i, j, r; + static bool warned = false; + + /* + * The second pass is needed to remove SECS pages that could not + * be removed during the first. + */ + for (i = 0; i < RETRY_NUM; i++) { + failures = 0; + for (j = 0; j < pcms->sgx_epc.nr_sections; j++) { + epc = pcms->sgx_epc.sections[j]; + hostmem = MEMORY_BACKEND(epc->hostmem); + fd = memory_region_get_fd(host_memory_backend_get_memory(hostmem)); + + r = ioctl(fd, SGX_IOC_VEPC_REMOVE_ALL); + if (r == -ENOTTY && !warned) { + warned = true; + warn_report("kernel does not support SGX_IOC_VEPC_REMOVE_ALL"); + warn_report("SGX might operate incorrectly in the guest after reset"); + break; + } else if (r > 0) { + /* SECS pages remain */ + failures++; + if (i == 1) { + error_report("cannot reset vEPC section %d", j); + } + } + } + if (!failures) { + break; + } + } +} + +SGXInfo *qmp_query_sgx_capabilities(Error **errp) +{ + SGXInfo *info = NULL; + uint32_t eax, ebx, ecx, edx; + + int fd = qemu_open_old("/dev/sgx_vepc", O_RDWR); + if (fd < 0) { + error_setg(errp, "SGX is not enabled in KVM"); + return NULL; + } + + info = g_new0(SGXInfo, 1); + host_cpuid(0x7, 0, &eax, &ebx, &ecx, &edx); + + info->sgx = ebx & (1U << 2) ? true : false; + info->flc = ecx & (1U << 30) ? true : false; + + host_cpuid(0x12, 0, &eax, &ebx, &ecx, &edx); + info->sgx1 = eax & (1U << 0) ? true : false; + info->sgx2 = eax & (1U << 1) ? true : false; + + info->section_size = sgx_calc_host_epc_section_size(); + + close(fd); + + return info; +} + +SGXInfo *qmp_query_sgx(Error **errp) +{ + SGXInfo *info = NULL; + X86MachineState *x86ms; + PCMachineState *pcms = + (PCMachineState *)object_dynamic_cast(qdev_get_machine(), + TYPE_PC_MACHINE); + if (!pcms) { + error_setg(errp, "SGX is only supported on PC machines"); + return NULL; + } + + x86ms = X86_MACHINE(pcms); + if (!x86ms->sgx_epc_list) { + error_setg(errp, "No EPC regions defined, SGX not available"); + return NULL; + } + + SGXEPCState *sgx_epc = &pcms->sgx_epc; + info = g_new0(SGXInfo, 1); + + info->sgx = true; + info->sgx1 = true; + info->sgx2 = true; + info->flc = true; + info->section_size = sgx_epc->size; + + return info; +} + +void hmp_info_sgx(Monitor *mon, const QDict *qdict) +{ + Error *err = NULL; + g_autoptr(SGXInfo) info = qmp_query_sgx(&err); + + if (err) { + error_report_err(err); + return; + } + monitor_printf(mon, "SGX support: %s\n", + info->sgx ? "enabled" : "disabled"); + monitor_printf(mon, "SGX1 support: %s\n", + info->sgx1 ? "enabled" : "disabled"); + monitor_printf(mon, "SGX2 support: %s\n", + info->sgx2 ? "enabled" : "disabled"); + monitor_printf(mon, "FLC support: %s\n", + info->flc ? "enabled" : "disabled"); + monitor_printf(mon, "size: %" PRIu64 "\n", + info->section_size); +} + +bool sgx_epc_get_section(int section_nr, uint64_t *addr, uint64_t *size) +{ + PCMachineState *pcms = PC_MACHINE(qdev_get_machine()); + SGXEPCDevice *epc; + + if (pcms->sgx_epc.size == 0 || pcms->sgx_epc.nr_sections <= section_nr) { + return true; + } + + epc = pcms->sgx_epc.sections[section_nr]; + + *addr = epc->addr; + *size = memory_device_get_region_size(MEMORY_DEVICE(epc), &error_fatal); + + return false; +} + +void pc_machine_init_sgx_epc(PCMachineState *pcms) +{ + SGXEPCState *sgx_epc = &pcms->sgx_epc; + X86MachineState *x86ms = X86_MACHINE(pcms); + SgxEPCList *list = NULL; + Object *obj; + + memset(sgx_epc, 0, sizeof(SGXEPCState)); + if (!x86ms->sgx_epc_list) { + return; + } + + sgx_epc->base = 0x100000000ULL + x86ms->above_4g_mem_size; + + memory_region_init(&sgx_epc->mr, OBJECT(pcms), "sgx-epc", UINT64_MAX); + memory_region_add_subregion(get_system_memory(), sgx_epc->base, + &sgx_epc->mr); + + for (list = x86ms->sgx_epc_list; list; list = list->next) { + obj = object_new("sgx-epc"); + + /* set the memdev link with memory backend */ + object_property_parse(obj, SGX_EPC_MEMDEV_PROP, list->value->memdev, + &error_fatal); + object_property_set_bool(obj, "realized", true, &error_fatal); + object_unref(obj); + } + + if ((sgx_epc->base + sgx_epc->size) < sgx_epc->base) { + error_report("Size of all 'sgx-epc' =0x%"PRIu64" causes EPC to wrap", + sgx_epc->size); + exit(EXIT_FAILURE); + } + + memory_region_set_size(&sgx_epc->mr, sgx_epc->size); + + /* register the reset callback for sgx epc */ + qemu_register_reset(sgx_epc_reset, NULL); +} diff --git a/hw/i386/vmmouse.c b/hw/i386/vmmouse.c index df4798f502..3d66368286 100644 --- a/hw/i386/vmmouse.c +++ b/hw/i386/vmmouse.c @@ -158,6 +158,7 @@ static void vmmouse_read_id(VMMouseState *s) s->queue[s->nb_queue++] = VMMOUSE_VERSION; s->status = 0; + vmmouse_update_handler(s, s->absolute); } static void vmmouse_request_relative(VMMouseState *s) diff --git a/hw/i386/x86-iommu-stub.c b/hw/i386/x86-iommu-stub.c index c5ba077f9d..781b5ff922 100644 --- a/hw/i386/x86-iommu-stub.c +++ b/hw/i386/x86-iommu-stub.c @@ -36,8 +36,3 @@ bool x86_iommu_ir_supported(X86IOMMUState *s) { return false; } - -IommuType x86_iommu_get_type(void) -{ - abort(); -} diff --git a/hw/i386/x86-iommu.c b/hw/i386/x86-iommu.c index 86ad03972e..01d11325a6 100644 --- a/hw/i386/x86-iommu.c +++ b/hw/i386/x86-iommu.c @@ -77,30 +77,17 @@ void x86_iommu_irq_to_msi_message(X86IOMMUIrq *irq, MSIMessage *msg_out) msg_out->data = msg.msi_data; } -/* Default X86 IOMMU device */ -static X86IOMMUState *x86_iommu_default = NULL; - -static void x86_iommu_set_default(X86IOMMUState *x86_iommu) -{ - assert(x86_iommu); - - if (x86_iommu_default) { - error_report("QEMU does not support multiple vIOMMUs " - "for x86 yet."); - exit(1); - } - - x86_iommu_default = x86_iommu; -} - X86IOMMUState *x86_iommu_get_default(void) { - return x86_iommu_default; -} + MachineState *ms = MACHINE(qdev_get_machine()); + PCMachineState *pcms = + PC_MACHINE(object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE)); -IommuType x86_iommu_get_type(void) -{ - return x86_iommu_default->type; + if (pcms && + object_dynamic_cast(OBJECT(pcms->iommu), TYPE_X86_IOMMU_DEVICE)) { + return X86_IOMMU_DEVICE(pcms->iommu); + } + return NULL; } static void x86_iommu_realize(DeviceState *dev, Error **errp) @@ -136,8 +123,6 @@ static void x86_iommu_realize(DeviceState *dev, Error **errp) if (x86_class->realize) { x86_class->realize(dev, errp); } - - x86_iommu_set_default(X86_IOMMU_DEVICE(dev)); } static Property x86_iommu_properties[] = { diff --git a/hw/i386/x86.c b/hw/i386/x86.c index 00448ed55a..b84840a1bb 100644 --- a/hw/i386/x86.c +++ b/hw/i386/x86.c @@ -30,6 +30,8 @@ #include "qapi/error.h" #include "qapi/qmp/qerror.h" #include "qapi/qapi-visit-common.h" +#include "qapi/clone-visitor.h" +#include "qapi/qapi-visit-machine.h" #include "qapi/visitor.h" #include "sysemu/qtest.h" #include "sysemu/whpx.h" @@ -45,6 +47,7 @@ #include "hw/i386/fw_cfg.h" #include "hw/intc/i8259.h" #include "hw/rtc/mc146818rtc.h" +#include "target/i386/sev.h" #include "hw/acpi/cpu_hotplug.h" #include "hw/irq.h" @@ -761,9 +764,9 @@ static bool load_elfboot(const char *kernel_filename, void x86_load_linux(X86MachineState *x86ms, FWCfgState *fw_cfg, int acpi_data_size, - bool pvh_enabled, - bool linuxboot_dma_enabled) + bool pvh_enabled) { + bool linuxboot_dma_enabled = X86_MACHINE_GET_CLASS(x86ms)->fwcfg_dma_enabled; uint16_t protocol; int setup_size, kernel_size, cmdline_size; int dtb_size, setup_data_offset; @@ -778,6 +781,7 @@ void x86_load_linux(X86MachineState *x86ms, const char *initrd_filename = machine->initrd_filename; const char *dtb_filename = machine->dtb; const char *kernel_cmdline = machine->kernel_cmdline; + SevKernelLoaderContext sev_load_ctx = {}; /* Align to 16 bytes as a paranoia measure */ cmdline_size = (strlen(kernel_cmdline) + 16) & ~15; @@ -810,7 +814,7 @@ void x86_load_linux(X86MachineState *x86ms, * PVH), so we try multiboot first since we check the multiboot magic * header before to load it. */ - if (load_multiboot(fw_cfg, f, kernel_filename, initrd_filename, + if (load_multiboot(x86ms, fw_cfg, f, kernel_filename, initrd_filename, kernel_cmdline, kernel_size, header)) { return; } @@ -924,6 +928,8 @@ void x86_load_linux(X86MachineState *x86ms, fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_ADDR, cmdline_addr); fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, strlen(kernel_cmdline) + 1); fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, kernel_cmdline); + sev_load_ctx.cmdline_data = (char *)kernel_cmdline; + sev_load_ctx.cmdline_size = strlen(kernel_cmdline) + 1; if (protocol >= 0x202) { stl_p(header + 0x228, cmdline_addr); @@ -1005,6 +1011,8 @@ void x86_load_linux(X86MachineState *x86ms, fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_ADDR, initrd_addr); fw_cfg_add_i32(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); fw_cfg_add_bytes(fw_cfg, FW_CFG_INITRD_DATA, initrd_data, initrd_size); + sev_load_ctx.initrd_data = initrd_data; + sev_load_ctx.initrd_size = initrd_size; stl_p(header + 0x218, initrd_addr); stl_p(header + 0x21c, initrd_size); @@ -1063,15 +1071,32 @@ void x86_load_linux(X86MachineState *x86ms, load_image_size(dtb_filename, setup_data->data, dtb_size); } - memcpy(setup, header, MIN(sizeof(header), setup_size)); + /* + * If we're starting an encrypted VM, it will be OVMF based, which uses the + * efi stub for booting and doesn't require any values to be placed in the + * kernel header. We therefore don't update the header so the hash of the + * kernel on the other side of the fw_cfg interface matches the hash of the + * file the user passed in. + */ + if (!sev_enabled()) { + memcpy(setup, header, MIN(sizeof(header), setup_size)); + } fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_ADDR, prot_addr); fw_cfg_add_i32(fw_cfg, FW_CFG_KERNEL_SIZE, kernel_size); fw_cfg_add_bytes(fw_cfg, FW_CFG_KERNEL_DATA, kernel, kernel_size); + sev_load_ctx.kernel_data = (char *)kernel; + sev_load_ctx.kernel_size = kernel_size; fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_ADDR, real_addr); fw_cfg_add_i32(fw_cfg, FW_CFG_SETUP_SIZE, setup_size); fw_cfg_add_bytes(fw_cfg, FW_CFG_SETUP_DATA, setup, setup_size); + sev_load_ctx.setup_data = (char *)setup; + sev_load_ctx.setup_size = setup_size; + + if (sev_enabled()) { + sev_add_kernel_loader_hashes(&sev_load_ctx, &error_fatal); + } option_rom[nb_option_roms].bootindex = 0; option_rom[nb_option_roms].name = "linuxboot.bin"; @@ -1263,6 +1288,27 @@ static void x86_machine_set_bus_lock_ratelimit(Object *obj, Visitor *v, visit_type_uint64(v, name, &x86ms->bus_lock_ratelimit, errp); } +static void machine_get_sgx_epc(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + SgxEPCList *list = x86ms->sgx_epc_list; + + visit_type_SgxEPCList(v, name, &list, errp); +} + +static void machine_set_sgx_epc(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + X86MachineState *x86ms = X86_MACHINE(obj); + SgxEPCList *list; + + list = x86ms->sgx_epc_list; + visit_type_SgxEPCList(v, name, &x86ms->sgx_epc_list, errp); + + qapi_free_SgxEPCList(list); +} + static void x86_machine_initfn(Object *obj) { X86MachineState *x86ms = X86_MACHINE(obj); @@ -1286,6 +1332,7 @@ static void x86_machine_class_init(ObjectClass *oc, void *data) mc->possible_cpu_arch_ids = x86_possible_cpu_arch_ids; x86mc->compat_apic_id_mode = false; x86mc->save_tsc_khz = true; + x86mc->fwcfg_dma_enabled = true; nc->nmi_monitor_handler = x86_nmi; object_class_property_add(oc, X86_MACHINE_SMM, "OnOffAuto", @@ -1322,6 +1369,12 @@ static void x86_machine_class_init(ObjectClass *oc, void *data) x86_machine_set_bus_lock_ratelimit, NULL, NULL); object_class_property_set_description(oc, X86_MACHINE_BUS_LOCK_RATELIMIT, "Set the ratelimit for the bus locks acquired in VMs"); + + object_class_property_add(oc, "sgx-epc", "SgxEPC", + machine_get_sgx_epc, machine_set_sgx_epc, + NULL, NULL); + object_class_property_set_description(oc, "sgx-epc", + "SGX EPC device"); } static const TypeInfo x86_machine_info = { diff --git a/hw/i386/xen/xen-hvm.c b/hw/i386/xen/xen-hvm.c index 9b432773f0..482be95415 100644 --- a/hw/i386/xen/xen-hvm.c +++ b/hw/i386/xen/xen-hvm.c @@ -721,6 +721,7 @@ static void xen_log_global_stop(MemoryListener *listener) } static MemoryListener xen_memory_listener = { + .name = "xen-memory", .region_add = xen_region_add, .region_del = xen_region_del, .log_start = xen_log_start, @@ -732,6 +733,7 @@ static MemoryListener xen_memory_listener = { }; static MemoryListener xen_io_listener = { + .name = "xen-io", .region_add = xen_io_add, .region_del = xen_io_del, .priority = 10, @@ -1611,8 +1613,8 @@ void xen_hvm_modified_memory(ram_addr_t start, ram_addr_t length) void qmp_xen_set_global_dirty_log(bool enable, Error **errp) { if (enable) { - memory_global_dirty_log_start(); + memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION); } else { - memory_global_dirty_log_stop(); + memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION); } } diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c index f2c5157483..a94c6e26fb 100644 --- a/hw/ide/ahci.c +++ b/hw/ide/ahci.c @@ -1548,7 +1548,7 @@ void ahci_realize(AHCIState *s, DeviceState *qdev, AddressSpace *as, int ports) for (i = 0; i < s->ports; i++) { AHCIDevice *ad = &s->dev[i]; - ide_bus_new(&ad->port, sizeof(ad->port), qdev, i, 1); + ide_bus_init(&ad->port, sizeof(ad->port), qdev, i, 1); ide_init2(&ad->port, irqs[i]); ad->hba = s; diff --git a/hw/ide/cmd646.c b/hw/ide/cmd646.c index c254631485..94c576262c 100644 --- a/hw/ide/cmd646.c +++ b/hw/ide/cmd646.c @@ -293,7 +293,7 @@ static void pci_cmd646_ide_realize(PCIDevice *dev, Error **errp) qdev_init_gpio_in(ds, cmd646_set_irq, 2); for (i = 0; i < 2; i++) { - ide_bus_new(&d->bus[i], sizeof(d->bus[i]), ds, i, 2); + ide_bus_init(&d->bus[i], sizeof(d->bus[i]), ds, i, 2); ide_init2(&d->bus[i], qdev_get_gpio_in(ds, i)); bmdma_init(&d->bus[i], &d->bmdma[i], d); diff --git a/hw/ide/core.c b/hw/ide/core.c index fd69ca3167..e28f8aad61 100644 --- a/hw/ide/core.c +++ b/hw/ide/core.c @@ -98,8 +98,12 @@ static void put_le16(uint16_t *p, unsigned int v) static void ide_identify_size(IDEState *s) { uint16_t *p = (uint16_t *)s->identify_data; - put_le16(p + 60, s->nb_sectors); - put_le16(p + 61, s->nb_sectors >> 16); + int64_t nb_sectors_lba28 = s->nb_sectors; + if (nb_sectors_lba28 >= 1 << 28) { + nb_sectors_lba28 = (1 << 28) - 1; + } + put_le16(p + 60, nb_sectors_lba28); + put_le16(p + 61, nb_sectors_lba28 >> 16); put_le16(p + 100, s->nb_sectors); put_le16(p + 101, s->nb_sectors >> 16); put_le16(p + 102, s->nb_sectors >> 32); diff --git a/hw/ide/isa.c b/hw/ide/isa.c index 6bc19de226..24bbde24c2 100644 --- a/hw/ide/isa.c +++ b/hw/ide/isa.c @@ -73,7 +73,7 @@ static void isa_ide_realizefn(DeviceState *dev, Error **errp) ISADevice *isadev = ISA_DEVICE(dev); ISAIDEState *s = ISA_IDE(dev); - ide_bus_new(&s->bus, sizeof(s->bus), dev, 0, 2); + ide_bus_init(&s->bus, sizeof(s->bus), dev, 0, 2); ide_init_ioport(&s->bus, isadev, s->iobase, s->iobase2); isa_init_irq(isadev, &s->irq, s->isairq); ide_init2(&s->bus, s->irq); diff --git a/hw/ide/macio.c b/hw/ide/macio.c index b270a10163..b03d401ceb 100644 --- a/hw/ide/macio.c +++ b/hw/ide/macio.c @@ -449,7 +449,7 @@ static void macio_ide_initfn(Object *obj) SysBusDevice *d = SYS_BUS_DEVICE(obj); MACIOIDEState *s = MACIO_IDE(obj); - ide_bus_new(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2); + ide_bus_init(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2); memory_region_init_io(&s->mem, obj, &pmac_ide_ops, s, "pmac-ide", 0x1000); sysbus_init_mmio(d, &s->mem); sysbus_init_irq(d, &s->real_ide_irq); diff --git a/hw/ide/microdrive.c b/hw/ide/microdrive.c index 58a14fea36..6df9b4cbbe 100644 --- a/hw/ide/microdrive.c +++ b/hw/ide/microdrive.c @@ -605,7 +605,7 @@ static void microdrive_init(Object *obj) { MicroDriveState *md = MICRODRIVE(obj); - ide_bus_new(&md->bus, sizeof(md->bus), DEVICE(obj), 0, 1); + ide_bus_init(&md->bus, sizeof(md->bus), DEVICE(obj), 0, 1); } static void microdrive_class_init(ObjectClass *oc, void *data) diff --git a/hw/ide/mmio.c b/hw/ide/mmio.c index 36e2f4790a..fb2ebd4847 100644 --- a/hw/ide/mmio.c +++ b/hw/ide/mmio.c @@ -142,7 +142,7 @@ static void mmio_ide_initfn(Object *obj) SysBusDevice *d = SYS_BUS_DEVICE(obj); MMIOState *s = MMIO_IDE(obj); - ide_bus_new(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2); + ide_bus_init(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2); sysbus_init_irq(d, &s->irq); } diff --git a/hw/ide/piix.c b/hw/ide/piix.c index d3e738320b..ce89fd0aa3 100644 --- a/hw/ide/piix.c +++ b/hw/ide/piix.c @@ -137,7 +137,7 @@ static int pci_piix_init_ports(PCIIDEState *d) int i, ret; for (i = 0; i < 2; i++) { - ide_bus_new(&d->bus[i], sizeof(d->bus[i]), DEVICE(d), i, 2); + ide_bus_init(&d->bus[i], sizeof(d->bus[i]), DEVICE(d), i, 2); ret = ide_init_ioport(&d->bus[i], NULL, port_info[i].iobase, port_info[i].iobase2); if (ret) { diff --git a/hw/ide/qdev.c b/hw/ide/qdev.c index e70ebc83a0..618045b85a 100644 --- a/hw/ide/qdev.c +++ b/hw/ide/qdev.c @@ -68,10 +68,10 @@ static const TypeInfo ide_bus_info = { .class_init = ide_bus_class_init, }; -void ide_bus_new(IDEBus *idebus, size_t idebus_size, DeviceState *dev, +void ide_bus_init(IDEBus *idebus, size_t idebus_size, DeviceState *dev, int bus_id, int max_units) { - qbus_create_inplace(idebus, idebus_size, TYPE_IDE_BUS, dev, NULL); + qbus_init(idebus, idebus_size, TYPE_IDE_BUS, dev, NULL); idebus->bus_id = bus_id; idebus->max_units = max_units; } diff --git a/hw/ide/sii3112.c b/hw/ide/sii3112.c index 34c347b9c2..46204f10d7 100644 --- a/hw/ide/sii3112.c +++ b/hw/ide/sii3112.c @@ -283,7 +283,7 @@ static void sii3112_pci_realize(PCIDevice *dev, Error **errp) qdev_init_gpio_in(ds, sii3112_set_irq, 2); for (i = 0; i < 2; i++) { - ide_bus_new(&s->bus[i], sizeof(s->bus[i]), ds, i, 1); + ide_bus_init(&s->bus[i], sizeof(s->bus[i]), ds, i, 1); ide_init2(&s->bus[i], qdev_get_gpio_in(ds, i)); bmdma_init(&s->bus[i], &s->bmdma[i], s); diff --git a/hw/ide/via.c b/hw/ide/via.c index be09912b33..82def819c4 100644 --- a/hw/ide/via.c +++ b/hw/ide/via.c @@ -29,7 +29,7 @@ #include "migration/vmstate.h" #include "qemu/module.h" #include "sysemu/dma.h" - +#include "hw/isa/vt82c686.h" #include "hw/ide/pci.h" #include "trace.h" @@ -112,7 +112,7 @@ static void via_ide_set_irq(void *opaque, int n, int level) d->config[0x70 + n * 8] &= ~0x80; } - qemu_set_irq(isa_get_irq(NULL, 14 + n), level); + via_isa_set_irq(pci_get_function_0(d), 14 + n, level); } static void via_ide_reset(DeviceState *dev) @@ -190,7 +190,7 @@ static void via_ide_realize(PCIDevice *dev, Error **errp) qdev_init_gpio_in(ds, via_ide_set_irq, 2); for (i = 0; i < 2; i++) { - ide_bus_new(&d->bus[i], sizeof(d->bus[i]), ds, i, 2); + ide_bus_init(&d->bus[i], sizeof(d->bus[i]), ds, i, 2); ide_init2(&d->bus[i], qdev_get_gpio_in(ds, i)); bmdma_init(&d->bus[i], &d->bmdma[i], d); @@ -217,6 +217,9 @@ static void via_ide_class_init(ObjectClass *klass, void *data) dc->reset = via_ide_reset; dc->vmsd = &vmstate_ide_pci; + /* Reason: only works as function of VIA southbridge */ + dc->user_creatable = false; + k->realize = via_ide_realize; k->exit = via_ide_exitfn; k->vendor_id = PCI_VENDOR_ID_VIA; diff --git a/hw/input/lasips2.c b/hw/input/lasips2.c index e7faf24058..68d741d342 100644 --- a/hw/input/lasips2.c +++ b/hw/input/lasips2.c @@ -96,7 +96,7 @@ typedef enum { LASIPS2_STATUS_CLKSHD = 0x80, } lasips2_status_reg_t; -static const char *artist_read_reg_name(uint64_t addr) +static const char *lasips2_read_reg_name(uint64_t addr) { switch (addr & 0xc) { case REG_PS2_ID: @@ -116,7 +116,7 @@ static const char *artist_read_reg_name(uint64_t addr) } } -static const char *artist_write_reg_name(uint64_t addr) +static const char *lasips2_write_reg_name(uint64_t addr) { switch (addr & 0x0c) { case REG_PS2_RESET: @@ -145,7 +145,7 @@ static void lasips2_reg_write(void *opaque, hwaddr addr, uint64_t val, LASIPS2Port *port = opaque; trace_lasips2_reg_write(size, port->id, addr, - artist_write_reg_name(addr), val); + lasips2_write_reg_name(addr), val); switch (addr & 0xc) { case REG_PS2_CONTROL: @@ -239,7 +239,7 @@ static uint64_t lasips2_reg_read(void *opaque, hwaddr addr, unsigned size) break; } trace_lasips2_reg_read(size, port->id, addr, - artist_read_reg_name(addr), ret); + lasips2_read_reg_name(addr), ret); return ret; } diff --git a/hw/intc/goldfish_pic.c b/hw/intc/goldfish_pic.c index e3b43a69f1..dfd53275f6 100644 --- a/hw/intc/goldfish_pic.c +++ b/hw/intc/goldfish_pic.c @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * Goldfish PIC * diff --git a/hw/intc/ibex_plic.c b/hw/intc/ibex_plic.c deleted file mode 100644 index ff430356f8..0000000000 --- a/hw/intc/ibex_plic.c +++ /dev/null @@ -1,307 +0,0 @@ -/* - * QEMU RISC-V lowRISC Ibex PLIC - * - * Copyright (c) 2020 Western Digital - * - * Documentation avaliable: https://docs.opentitan.org/hw/ip/rv_plic/doc/ - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2 or later, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . - */ - -#include "qemu/osdep.h" -#include "qemu/log.h" -#include "hw/qdev-properties.h" -#include "hw/core/cpu.h" -#include "hw/boards.h" -#include "hw/pci/msi.h" -#include "target/riscv/cpu_bits.h" -#include "target/riscv/cpu.h" -#include "hw/intc/ibex_plic.h" -#include "hw/irq.h" - -static bool addr_between(uint32_t addr, uint32_t base, uint32_t num) -{ - uint32_t end = base + (num * 0x04); - - if (addr >= base && addr < end) { - return true; - } - - return false; -} - -static void ibex_plic_irqs_set_pending(IbexPlicState *s, int irq, bool level) -{ - int pending_num = irq / 32; - - if (!level) { - /* - * If the level is low make sure we clear the hidden_pending. - */ - s->hidden_pending[pending_num] &= ~(1 << (irq % 32)); - } - - if (s->claimed[pending_num] & 1 << (irq % 32)) { - /* - * The interrupt has been claimed, but not completed. - * The pending bit can't be set. - * Save the pending level for after the interrupt is completed. - */ - s->hidden_pending[pending_num] |= level << (irq % 32); - } else { - s->pending[pending_num] |= level << (irq % 32); - } -} - -static bool ibex_plic_irqs_pending(IbexPlicState *s, uint32_t context) -{ - int i; - uint32_t max_irq = 0; - uint32_t max_prio = s->threshold; - - for (i = 0; i < s->pending_num; i++) { - uint32_t irq_num = ctz64(s->pending[i]) + (i * 32); - - if (!(s->pending[i] & s->enable[i])) { - /* No pending and enabled IRQ */ - continue; - } - - if (s->priority[irq_num] > max_prio) { - max_irq = irq_num; - max_prio = s->priority[irq_num]; - } - } - - if (max_irq) { - s->claim = max_irq; - return true; - } - - return false; -} - -static void ibex_plic_update(IbexPlicState *s) -{ - int i; - - for (i = 0; i < s->num_cpus; i++) { - qemu_set_irq(s->external_irqs[i], ibex_plic_irqs_pending(s, 0)); - } -} - -static void ibex_plic_reset(DeviceState *dev) -{ - IbexPlicState *s = IBEX_PLIC(dev); - - s->threshold = 0x00000000; - s->claim = 0x00000000; -} - -static uint64_t ibex_plic_read(void *opaque, hwaddr addr, - unsigned int size) -{ - IbexPlicState *s = opaque; - int offset; - uint32_t ret = 0; - - if (addr_between(addr, s->pending_base, s->pending_num)) { - offset = (addr - s->pending_base) / 4; - ret = s->pending[offset]; - } else if (addr_between(addr, s->source_base, s->source_num)) { - qemu_log_mask(LOG_UNIMP, - "%s: Interrupt source mode not supported\n", __func__); - } else if (addr_between(addr, s->priority_base, s->priority_num)) { - offset = (addr - s->priority_base) / 4; - ret = s->priority[offset]; - } else if (addr_between(addr, s->enable_base, s->enable_num)) { - offset = (addr - s->enable_base) / 4; - ret = s->enable[offset]; - } else if (addr_between(addr, s->threshold_base, 1)) { - ret = s->threshold; - } else if (addr_between(addr, s->claim_base, 1)) { - int pending_num = s->claim / 32; - s->pending[pending_num] &= ~(1 << (s->claim % 32)); - - /* Set the interrupt as claimed, but not completed */ - s->claimed[pending_num] |= 1 << (s->claim % 32); - - /* Return the current claimed interrupt */ - ret = s->claim; - - /* Clear the claimed interrupt */ - s->claim = 0x00000000; - - /* Update the interrupt status after the claim */ - ibex_plic_update(s); - } - - return ret; -} - -static void ibex_plic_write(void *opaque, hwaddr addr, - uint64_t value, unsigned int size) -{ - IbexPlicState *s = opaque; - - if (addr_between(addr, s->pending_base, s->pending_num)) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Pending registers are read only\n", __func__); - } else if (addr_between(addr, s->source_base, s->source_num)) { - qemu_log_mask(LOG_UNIMP, - "%s: Interrupt source mode not supported\n", __func__); - } else if (addr_between(addr, s->priority_base, s->priority_num)) { - uint32_t irq = ((addr - s->priority_base) >> 2) + 1; - s->priority[irq] = value & 7; - ibex_plic_update(s); - } else if (addr_between(addr, s->enable_base, s->enable_num)) { - uint32_t enable_reg = (addr - s->enable_base) / 4; - - s->enable[enable_reg] = value; - } else if (addr_between(addr, s->threshold_base, 1)) { - s->threshold = value & 3; - } else if (addr_between(addr, s->claim_base, 1)) { - if (s->claim == value) { - /* Interrupt was completed */ - s->claim = 0; - } - if (s->claimed[value / 32] & 1 << (value % 32)) { - int pending_num = value / 32; - - /* This value was already claimed, clear it. */ - s->claimed[pending_num] &= ~(1 << (value % 32)); - - if (s->hidden_pending[pending_num] & (1 << (value % 32))) { - /* - * If the bit in hidden_pending is set then that means we - * received an interrupt between claiming and completing - * the interrupt that hasn't since been de-asserted. - * On hardware this would trigger an interrupt, so let's - * trigger one here as well. - */ - s->pending[pending_num] |= 1 << (value % 32); - } - } - } - - ibex_plic_update(s); -} - -static const MemoryRegionOps ibex_plic_ops = { - .read = ibex_plic_read, - .write = ibex_plic_write, - .endianness = DEVICE_NATIVE_ENDIAN, - .valid = { - .min_access_size = 4, - .max_access_size = 4 - } -}; - -static void ibex_plic_irq_request(void *opaque, int irq, int level) -{ - IbexPlicState *s = opaque; - - ibex_plic_irqs_set_pending(s, irq, level > 0); - ibex_plic_update(s); -} - -static Property ibex_plic_properties[] = { - DEFINE_PROP_UINT32("num-cpus", IbexPlicState, num_cpus, 1), - DEFINE_PROP_UINT32("num-sources", IbexPlicState, num_sources, 176), - - DEFINE_PROP_UINT32("pending-base", IbexPlicState, pending_base, 0), - DEFINE_PROP_UINT32("pending-num", IbexPlicState, pending_num, 6), - - DEFINE_PROP_UINT32("source-base", IbexPlicState, source_base, 0x18), - DEFINE_PROP_UINT32("source-num", IbexPlicState, source_num, 6), - - DEFINE_PROP_UINT32("priority-base", IbexPlicState, priority_base, 0x30), - DEFINE_PROP_UINT32("priority-num", IbexPlicState, priority_num, 177), - - DEFINE_PROP_UINT32("enable-base", IbexPlicState, enable_base, 0x300), - DEFINE_PROP_UINT32("enable-num", IbexPlicState, enable_num, 6), - - DEFINE_PROP_UINT32("threshold-base", IbexPlicState, threshold_base, 0x318), - - DEFINE_PROP_UINT32("claim-base", IbexPlicState, claim_base, 0x31c), - DEFINE_PROP_END_OF_LIST(), -}; - -static void ibex_plic_init(Object *obj) -{ - IbexPlicState *s = IBEX_PLIC(obj); - - memory_region_init_io(&s->mmio, obj, &ibex_plic_ops, s, - TYPE_IBEX_PLIC, 0x400); - sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); -} - -static void ibex_plic_realize(DeviceState *dev, Error **errp) -{ - IbexPlicState *s = IBEX_PLIC(dev); - int i; - - s->pending = g_new0(uint32_t, s->pending_num); - s->hidden_pending = g_new0(uint32_t, s->pending_num); - s->claimed = g_new0(uint32_t, s->pending_num); - s->source = g_new0(uint32_t, s->source_num); - s->priority = g_new0(uint32_t, s->priority_num); - s->enable = g_new0(uint32_t, s->enable_num); - - qdev_init_gpio_in(dev, ibex_plic_irq_request, s->num_sources); - - s->external_irqs = g_malloc(sizeof(qemu_irq) * s->num_cpus); - qdev_init_gpio_out(dev, s->external_irqs, s->num_cpus); - - /* - * We can't allow the supervisor to control SEIP as this would allow the - * supervisor to clear a pending external interrupt which will result in - * a lost interrupt in the case a PLIC is attached. The SEIP bit must be - * hardware controlled when a PLIC is attached. - */ - MachineState *ms = MACHINE(qdev_get_machine()); - unsigned int smp_cpus = ms->smp.cpus; - for (i = 0; i < smp_cpus; i++) { - RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(i)); - if (riscv_cpu_claim_interrupts(cpu, MIP_SEIP) < 0) { - error_report("SEIP already claimed"); - exit(1); - } - } - - msi_nonbroken = true; -} - -static void ibex_plic_class_init(ObjectClass *klass, void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->reset = ibex_plic_reset; - device_class_set_props(dc, ibex_plic_properties); - dc->realize = ibex_plic_realize; -} - -static const TypeInfo ibex_plic_info = { - .name = TYPE_IBEX_PLIC, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(IbexPlicState), - .instance_init = ibex_plic_init, - .class_init = ibex_plic_class_init, -}; - -static void ibex_plic_register_types(void) -{ - type_register_static(&ibex_plic_info); -} - -type_init(ibex_plic_register_types) diff --git a/hw/intc/m68k_irqc.c b/hw/intc/m68k_irqc.c index 2133d2a698..0c515e4ecb 100644 --- a/hw/intc/m68k_irqc.c +++ b/hw/intc/m68k_irqc.c @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * QEMU Motorola 680x0 IRQ Controller * diff --git a/hw/intc/meson.build b/hw/intc/meson.build index a1d00aa48d..c89d2ca180 100644 --- a/hw/intc/meson.build +++ b/hw/intc/meson.build @@ -32,7 +32,6 @@ specific_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('armv7m_nvic.c')) specific_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_vic.c')) specific_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_gic.c', 'exynos4210_combiner.c')) specific_ss.add(when: 'CONFIG_GRLIB', if_true: files('grlib_irqmp.c')) -specific_ss.add(when: 'CONFIG_IBEX', if_true: files('ibex_plic.c')) specific_ss.add(when: 'CONFIG_IOAPIC', if_true: files('ioapic.c')) specific_ss.add(when: 'CONFIG_LOONGSON_LIOINTC', if_true: files('loongson_liointc.c')) specific_ss.add(when: 'CONFIG_MIPS_CPS', if_true: files('mips_gic.c')) diff --git a/hw/intc/openpic.c b/hw/intc/openpic.c index 9b4c17854d..49504e740f 100644 --- a/hw/intc/openpic.c +++ b/hw/intc/openpic.c @@ -25,12 +25,8 @@ /* * * Based on OpenPic implementations: - * - Intel GW80314 I/O companion chip developer's manual * - Motorola MPC8245 & MPC8540 user manuals. - * - Motorola MCP750 (aka Raven) programmer manual. - * - Motorola Harrier programmer manuel - * - * Serial interrupts, as implemented in Raven chipset are not supported yet. + * - Motorola Harrier programmer manual * */ @@ -51,7 +47,7 @@ #include "qemu/timer.h" #include "qemu/error-report.h" -//#define DEBUG_OPENPIC +/* #define DEBUG_OPENPIC */ #ifdef DEBUG_OPENPIC static const int debug_openpic = 1; @@ -122,7 +118,8 @@ static FslMpicInfo fsl_mpic_42 = { #define ILR_INTTGT_CINT 0x01 /* critical */ #define ILR_INTTGT_MCP 0x02 /* machine check */ -/* The currently supported INTTGT values happen to be the same as QEMU's +/* + * The currently supported INTTGT values happen to be the same as QEMU's * openpic output codes, but don't depend on this. The output codes * could change (unlikely, but...) or support could be added for * more INTTGT values. @@ -181,10 +178,11 @@ static void openpic_cpu_write_internal(void *opaque, hwaddr addr, uint32_t val, int idx); static void openpic_reset(DeviceState *d); -/* Convert between openpic clock ticks and nanosecs. In the hardware the clock - frequency is driven by board inputs to the PIC which the PIC would then - divide by 4 or 8. For now hard code to 25MZ. -*/ +/* + * Convert between openpic clock ticks and nanosecs. In the hardware the clock + * frequency is driven by board inputs to the PIC which the PIC would then + * divide by 4 or 8. For now hard code to 25MZ. + */ #define OPENPIC_TIMER_FREQ_MHZ 25 #define OPENPIC_TIMER_NS_PER_TICK (1000 / OPENPIC_TIMER_FREQ_MHZ) static inline uint64_t ns_to_ticks(uint64_t ns) @@ -257,7 +255,8 @@ static void IRQ_local_pipe(OpenPICState *opp, int n_CPU, int n_IRQ, __func__, src->output, n_IRQ, active, was_active, dst->outputs_active[src->output]); - /* On Freescale MPIC, critical interrupts ignore priority, + /* + * On Freescale MPIC, critical interrupts ignore priority, * IACK, EOI, etc. Before MPIC v4.1 they also ignore * masking. */ @@ -280,7 +279,8 @@ static void IRQ_local_pipe(OpenPICState *opp, int n_CPU, int n_IRQ, priority = IVPR_PRIORITY(src->ivpr); - /* Even if the interrupt doesn't have enough priority, + /* + * Even if the interrupt doesn't have enough priority, * it is still raised, in case ctpr is lowered later. */ if (active) { @@ -412,7 +412,8 @@ static void openpic_set_irq(void *opaque, int n_IRQ, int level) } if (src->output != OPENPIC_OUTPUT_INT) { - /* Edge-triggered interrupts shouldn't be used + /* + * Edge-triggered interrupts shouldn't be used * with non-INT delivery, but just in case, * try to make it do something sane rather than * cause an interrupt storm. This is close to @@ -505,7 +506,8 @@ static inline void write_IRQreg_ivpr(OpenPICState *opp, int n_IRQ, uint32_t val) { uint32_t mask; - /* NOTE when implementing newer FSL MPIC models: starting with v4.0, + /* + * NOTE when implementing newer FSL MPIC models: starting with v4.0, * the polarity bit is read-only on internal interrupts. */ mask = IVPR_MASK_MASK | IVPR_PRIORITY_MASK | IVPR_SENSE_MASK | @@ -515,7 +517,8 @@ static inline void write_IRQreg_ivpr(OpenPICState *opp, int n_IRQ, uint32_t val) opp->src[n_IRQ].ivpr = (opp->src[n_IRQ].ivpr & IVPR_ACTIVITY_MASK) | (val & mask); - /* For FSL internal interrupts, The sense bit is reserved and zero, + /* + * For FSL internal interrupts, The sense bit is reserved and zero, * and the interrupt is always level-triggered. Timers and IPIs * have no sense or polarity bits, and are edge-triggered. */ @@ -699,16 +702,20 @@ static void qemu_timer_cb(void *opaque) openpic_set_irq(opp, n_IRQ, 0); } -/* If enabled is true, arranges for an interrupt to be raised val clocks into - the future, if enabled is false cancels the timer. */ +/* + * If enabled is true, arranges for an interrupt to be raised val clocks into + * the future, if enabled is false cancels the timer. + */ static void openpic_tmr_set_tmr(OpenPICTimer *tmr, uint32_t val, bool enabled) { uint64_t ns = ticks_to_ns(val & ~TCCR_TOG); - /* A count of zero causes a timer to be set to expire immediately. This - effectively stops the simulation since the timer is constantly expiring - which prevents guest code execution, so we don't honor that - configuration. On real hardware, this situation would generate an - interrupt on every clock cycle if the interrupt was unmasked. */ + /* + * A count of zero causes a timer to be set to expire immediately. This + * effectively stops the simulation since the timer is constantly expiring + * which prevents guest code execution, so we don't honor that + * configuration. On real hardware, this situation would generate an + * interrupt on every clock cycle if the interrupt was unmasked. + */ if ((ns == 0) || !enabled) { tmr->qemu_timer_active = false; tmr->tccr = tmr->tccr & TCCR_TOG; @@ -721,8 +728,10 @@ static void openpic_tmr_set_tmr(OpenPICTimer *tmr, uint32_t val, bool enabled) } } -/* Returns the currrent tccr value, i.e., timer value (in clocks) with - appropriate TOG. */ +/* + * Returns the currrent tccr value, i.e., timer value (in clocks) with + * appropriate TOG. + */ static uint64_t openpic_tmr_get_timer(OpenPICTimer *tmr) { uint64_t retval; @@ -1276,6 +1285,15 @@ static void openpic_reset(DeviceState *d) break; } + /* Mask all IPI interrupts for Freescale OpenPIC */ + if ((opp->model == OPENPIC_MODEL_FSL_MPIC_20) || + (opp->model == OPENPIC_MODEL_FSL_MPIC_42)) { + if (i >= opp->irq_ipi0 && i < opp->irq_tim0) { + write_IRQreg_idr(opp, i, 0); + continue; + } + } + write_IRQreg_idr(opp, i, opp->idr_reset); } /* Initialise IRQ destinations */ @@ -1304,7 +1322,7 @@ static void openpic_reset(DeviceState *d) typedef struct MemReg { const char *name; MemoryRegionOps const *ops; - hwaddr start_addr; + hwaddr start_addr; ram_addr_t size; } MemReg; @@ -1555,28 +1573,6 @@ static void openpic_realize(DeviceState *dev, Error **errp) break; - case OPENPIC_MODEL_RAVEN: - opp->nb_irqs = RAVEN_MAX_EXT; - opp->vid = VID_REVISION_1_3; - opp->vir = VIR_GENERIC; - opp->vector_mask = 0xFF; - opp->tfrr_reset = 4160000; - opp->ivpr_reset = IVPR_MASK_MASK | IVPR_MODE_MASK; - opp->idr_reset = 0; - opp->max_irq = RAVEN_MAX_IRQ; - opp->irq_ipi0 = RAVEN_IPI_IRQ; - opp->irq_tim0 = RAVEN_TMR_IRQ; - opp->brr1 = -1; - opp->mpic_mode_mask = GCR_MODE_MIXED; - - if (opp->nb_cpus != 1) { - error_setg(errp, "Only UP supported today"); - return; - } - - map_list(opp, list_le, &list_count); - break; - case OPENPIC_MODEL_KEYLARGO: opp->nb_irqs = KEYLARGO_MAX_EXT; opp->vid = VID_REVISION_1_2; diff --git a/hw/intc/openpic_kvm.c b/hw/intc/openpic_kvm.c index 21da680389..557dd0c2bf 100644 --- a/hw/intc/openpic_kvm.c +++ b/hw/intc/openpic_kvm.c @@ -234,6 +234,7 @@ static void kvm_openpic_realize(DeviceState *dev, Error **errp) opp->mem_listener.region_add = kvm_openpic_region_add; opp->mem_listener.region_del = kvm_openpic_region_del; + opp->mem_listener.name = "openpic-kvm"; memory_listener_register(&opp->mem_listener, &address_space_memory); /* indicate pic capabilities */ diff --git a/hw/intc/sh_intc.c b/hw/intc/sh_intc.c index 72a55e32dd..c9b0b0c1ec 100644 --- a/hw/intc/sh_intc.c +++ b/hw/intc/sh_intc.c @@ -9,40 +9,37 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" #include "hw/sh4/sh_intc.h" #include "hw/irq.h" #include "hw/sh4/sh.h" - -//#define DEBUG_INTC -//#define DEBUG_INTC_SOURCES - -#define INTC_A7(x) ((x) & 0x1fffffff) +#include "trace.h" void sh_intc_toggle_source(struct intc_source *source, - int enable_adj, int assert_adj) + int enable_adj, int assert_adj) { int enable_changed = 0; int pending_changed = 0; int old_pending; - if ((source->enable_count == source->enable_max) && (enable_adj == -1)) + if (source->enable_count == source->enable_max && enable_adj == -1) { enable_changed = -1; - + } source->enable_count += enable_adj; - if (source->enable_count == source->enable_max) + if (source->enable_count == source->enable_max) { enable_changed = 1; - + } source->asserted += assert_adj; old_pending = source->pending; source->pending = source->asserted && (source->enable_count == source->enable_max); - if (old_pending != source->pending) + if (old_pending != source->pending) { pending_changed = 1; - + } if (pending_changed) { if (source->pending) { source->parent->pending++; @@ -54,35 +51,30 @@ void sh_intc_toggle_source(struct intc_source *source, if (source->parent->pending == 0) { cpu_reset_interrupt(first_cpu, CPU_INTERRUPT_HARD); } - } + } } - if (enable_changed || assert_adj || pending_changed) { -#ifdef DEBUG_INTC_SOURCES - printf("sh_intc: (%d/%d/%d/%d) interrupt source 0x%x %s%s%s\n", - source->parent->pending, - source->asserted, - source->enable_count, - source->enable_max, - source->vect, - source->asserted ? "asserted " : - assert_adj ? "deasserted" : "", - enable_changed == 1 ? "enabled " : - enable_changed == -1 ? "disabled " : "", - source->pending ? "pending" : ""); -#endif - } + if (enable_changed || assert_adj || pending_changed) { + trace_sh_intc_sources(source->parent->pending, source->asserted, + source->enable_count, source->enable_max, + source->vect, source->asserted ? "asserted " : + assert_adj ? "deasserted" : "", + enable_changed == 1 ? "enabled " : + enable_changed == -1 ? "disabled " : "", + source->pending ? "pending" : ""); + } } -static void sh_intc_set_irq (void *opaque, int n, int level) +static void sh_intc_set_irq(void *opaque, int n, int level) { - struct intc_desc *desc = opaque; - struct intc_source *source = &(desc->sources[n]); + struct intc_desc *desc = opaque; + struct intc_source *source = &desc->sources[n]; - if (level && !source->asserted) - sh_intc_toggle_source(source, 0, 1); - else if (!level && source->asserted) - sh_intc_toggle_source(source, 0, -1); + if (level && !source->asserted) { + sh_intc_toggle_source(source, 0, 1); + } else if (!level && source->asserted) { + sh_intc_toggle_source(source, 0, -1); + } } int sh_intc_get_pending_vector(struct intc_desc *desc, int imask) @@ -97,147 +89,124 @@ int sh_intc_get_pending_vector(struct intc_desc *desc, int imask) } for (i = 0; i < desc->nr_sources; i++) { - struct intc_source *source = desc->sources + i; + struct intc_source *source = &desc->sources[i]; - if (source->pending) { -#ifdef DEBUG_INTC_SOURCES - printf("sh_intc: (%d) returning interrupt source 0x%x\n", - desc->pending, source->vect); -#endif + if (source->pending) { + trace_sh_intc_pending(desc->pending, source->vect); return source->vect; - } + } } - - abort(); + g_assert_not_reached(); } -#define INTC_MODE_NONE 0 -#define INTC_MODE_DUAL_SET 1 -#define INTC_MODE_DUAL_CLR 2 -#define INTC_MODE_ENABLE_REG 3 -#define INTC_MODE_MASK_REG 4 -#define INTC_MODE_IS_PRIO 8 +typedef enum { + INTC_MODE_NONE, + INTC_MODE_DUAL_SET, + INTC_MODE_DUAL_CLR, + INTC_MODE_ENABLE_REG, + INTC_MODE_MASK_REG, +} SHIntCMode; +#define INTC_MODE_IS_PRIO 0x80 -static unsigned int sh_intc_mode(unsigned long address, - unsigned long set_reg, unsigned long clr_reg) +static SHIntCMode sh_intc_mode(unsigned long address, unsigned long set_reg, + unsigned long clr_reg) { - if ((address != INTC_A7(set_reg)) && - (address != INTC_A7(clr_reg))) + if (address != A7ADDR(set_reg) && address != A7ADDR(clr_reg)) { return INTC_MODE_NONE; - - if (set_reg && clr_reg) { - if (address == INTC_A7(set_reg)) - return INTC_MODE_DUAL_SET; - else - return INTC_MODE_DUAL_CLR; } - - if (set_reg) - return INTC_MODE_ENABLE_REG; - else - return INTC_MODE_MASK_REG; + if (set_reg && clr_reg) { + return address == A7ADDR(set_reg) ? + INTC_MODE_DUAL_SET : INTC_MODE_DUAL_CLR; + } + return set_reg ? INTC_MODE_ENABLE_REG : INTC_MODE_MASK_REG; } static void sh_intc_locate(struct intc_desc *desc, - unsigned long address, - unsigned long **datap, - intc_enum **enums, - unsigned int *first, - unsigned int *width, - unsigned int *modep) + unsigned long address, + unsigned long **datap, + intc_enum **enums, + unsigned int *first, + unsigned int *width, + unsigned int *modep) { - unsigned int i, mode; + SHIntCMode mode; + unsigned int i; /* this is slow but works for now */ if (desc->mask_regs) { for (i = 0; i < desc->nr_mask_regs; i++) { - struct intc_mask_reg *mr = desc->mask_regs + i; + struct intc_mask_reg *mr = &desc->mask_regs[i]; - mode = sh_intc_mode(address, mr->set_reg, mr->clr_reg); - if (mode == INTC_MODE_NONE) - continue; - - *modep = mode; - *datap = &mr->value; - *enums = mr->enum_ids; - *first = mr->reg_width - 1; - *width = 1; - return; - } + mode = sh_intc_mode(address, mr->set_reg, mr->clr_reg); + if (mode != INTC_MODE_NONE) { + *modep = mode; + *datap = &mr->value; + *enums = mr->enum_ids; + *first = mr->reg_width - 1; + *width = 1; + return; + } + } } if (desc->prio_regs) { for (i = 0; i < desc->nr_prio_regs; i++) { - struct intc_prio_reg *pr = desc->prio_regs + i; + struct intc_prio_reg *pr = &desc->prio_regs[i]; - mode = sh_intc_mode(address, pr->set_reg, pr->clr_reg); - if (mode == INTC_MODE_NONE) - continue; - - *modep = mode | INTC_MODE_IS_PRIO; - *datap = &pr->value; - *enums = pr->enum_ids; - *first = (pr->reg_width / pr->field_width) - 1; - *width = pr->field_width; - return; - } + mode = sh_intc_mode(address, pr->set_reg, pr->clr_reg); + if (mode != INTC_MODE_NONE) { + *modep = mode | INTC_MODE_IS_PRIO; + *datap = &pr->value; + *enums = pr->enum_ids; + *first = pr->reg_width / pr->field_width - 1; + *width = pr->field_width; + return; + } + } } - - abort(); + g_assert_not_reached(); } static void sh_intc_toggle_mask(struct intc_desc *desc, intc_enum id, - int enable, int is_group) + int enable, int is_group) { - struct intc_source *source = desc->sources + id; - - if (!id) - return; + struct intc_source *source = &desc->sources[id]; + if (!id) { + return; + } if (!source->next_enum_id && (!source->enable_max || !source->vect)) { -#ifdef DEBUG_INTC_SOURCES - printf("sh_intc: reserved interrupt source %d modified\n", id); -#endif - return; + qemu_log_mask(LOG_UNIMP, + "sh_intc: reserved interrupt source %d modified\n", id); + return; } - if (source->vect) + if (source->vect) { sh_intc_toggle_source(source, enable ? 1 : -1, 0); - -#ifdef DEBUG_INTC - else { - printf("setting interrupt group %d to %d\n", id, !!enable); } -#endif if ((is_group || !source->vect) && source->next_enum_id) { sh_intc_toggle_mask(desc, source->next_enum_id, enable, 1); } -#ifdef DEBUG_INTC if (!source->vect) { - printf("setting interrupt group %d to %d - done\n", id, !!enable); + trace_sh_intc_set(id, !!enable); } -#endif } -static uint64_t sh_intc_read(void *opaque, hwaddr offset, - unsigned size) +static uint64_t sh_intc_read(void *opaque, hwaddr offset, unsigned size) { struct intc_desc *desc = opaque; - intc_enum *enum_ids = NULL; - unsigned int first = 0; - unsigned int width = 0; - unsigned int mode = 0; + intc_enum *enum_ids; + unsigned int first; + unsigned int width; + unsigned int mode; unsigned long *valuep; -#ifdef DEBUG_INTC - printf("sh_intc_read 0x%lx\n", (unsigned long) offset); -#endif - - sh_intc_locate(desc, (unsigned long)offset, &valuep, - &enum_ids, &first, &width, &mode); + sh_intc_locate(desc, (unsigned long)offset, &valuep, + &enum_ids, &first, &width, &mode); + trace_sh_intc_read(size, (uint64_t)offset, *valuep); return *valuep; } @@ -245,45 +214,40 @@ static void sh_intc_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { struct intc_desc *desc = opaque; - intc_enum *enum_ids = NULL; - unsigned int first = 0; - unsigned int width = 0; - unsigned int mode = 0; - unsigned int k; + intc_enum *enum_ids; + unsigned int first; + unsigned int width; + unsigned int mode; unsigned long *valuep; + unsigned int k; unsigned long mask; -#ifdef DEBUG_INTC - printf("sh_intc_write 0x%lx 0x%08x\n", (unsigned long) offset, value); -#endif - - sh_intc_locate(desc, (unsigned long)offset, &valuep, - &enum_ids, &first, &width, &mode); - + trace_sh_intc_write(size, (uint64_t)offset, value); + sh_intc_locate(desc, (unsigned long)offset, &valuep, + &enum_ids, &first, &width, &mode); switch (mode) { - case INTC_MODE_ENABLE_REG | INTC_MODE_IS_PRIO: break; - case INTC_MODE_DUAL_SET: value |= *valuep; break; - case INTC_MODE_DUAL_CLR: value = *valuep & ~value; break; - default: abort(); + case INTC_MODE_ENABLE_REG | INTC_MODE_IS_PRIO: + break; + case INTC_MODE_DUAL_SET: + value |= *valuep; + break; + case INTC_MODE_DUAL_CLR: + value = *valuep & ~value; + break; + default: + g_assert_not_reached(); } for (k = 0; k <= first; k++) { - mask = ((1 << width) - 1) << ((first - k) * width); + mask = (1 << width) - 1; + mask <<= (first - k) * width; - if ((*valuep & mask) == (value & mask)) - continue; -#if 0 - printf("k = %d, first = %d, enum = %d, mask = 0x%08x\n", - k, first, enum_ids[k], (unsigned int)mask); -#endif - sh_intc_toggle_mask(desc, enum_ids[k], value & mask, 0); + if ((*valuep & mask) != (value & mask)) { + sh_intc_toggle_mask(desc, enum_ids[k], value & mask, 0); + } } *valuep = value; - -#ifdef DEBUG_INTC - printf("sh_intc_write 0x%lx -> 0x%08x\n", (unsigned long) offset, value); -#endif } static const MemoryRegionOps sh_intc_ops = { @@ -292,20 +256,105 @@ static const MemoryRegionOps sh_intc_ops = { .endianness = DEVICE_NATIVE_ENDIAN, }; -struct intc_source *sh_intc_source(struct intc_desc *desc, intc_enum id) +static void sh_intc_register_source(struct intc_desc *desc, + intc_enum source, + struct intc_group *groups, + int nr_groups) { - if (id) - return desc->sources + id; + unsigned int i, k; + intc_enum id; - return NULL; + if (desc->mask_regs) { + for (i = 0; i < desc->nr_mask_regs; i++) { + struct intc_mask_reg *mr = &desc->mask_regs[i]; + + for (k = 0; k < ARRAY_SIZE(mr->enum_ids); k++) { + id = mr->enum_ids[k]; + if (id && id == source) { + desc->sources[id].enable_max++; + } + } + } + } + + if (desc->prio_regs) { + for (i = 0; i < desc->nr_prio_regs; i++) { + struct intc_prio_reg *pr = &desc->prio_regs[i]; + + for (k = 0; k < ARRAY_SIZE(pr->enum_ids); k++) { + id = pr->enum_ids[k]; + if (id && id == source) { + desc->sources[id].enable_max++; + } + } + } + } + + if (groups) { + for (i = 0; i < nr_groups; i++) { + struct intc_group *gr = &groups[i]; + + for (k = 0; k < ARRAY_SIZE(gr->enum_ids); k++) { + id = gr->enum_ids[k]; + if (id && id == source) { + desc->sources[id].enable_max++; + } + } + } + } + +} + +void sh_intc_register_sources(struct intc_desc *desc, + struct intc_vect *vectors, + int nr_vectors, + struct intc_group *groups, + int nr_groups) +{ + unsigned int i, k; + intc_enum id; + struct intc_source *s; + + for (i = 0; i < nr_vectors; i++) { + struct intc_vect *vect = &vectors[i]; + + sh_intc_register_source(desc, vect->enum_id, groups, nr_groups); + id = vect->enum_id; + if (id) { + s = &desc->sources[id]; + s->vect = vect->vect; + trace_sh_intc_register("source", vect->enum_id, s->vect, + s->enable_count, s->enable_max); + } + } + + if (groups) { + for (i = 0; i < nr_groups; i++) { + struct intc_group *gr = &groups[i]; + + id = gr->enum_id; + s = &desc->sources[id]; + s->next_enum_id = gr->enum_ids[0]; + + for (k = 1; k < ARRAY_SIZE(gr->enum_ids); k++) { + if (gr->enum_ids[k]) { + id = gr->enum_ids[k - 1]; + s = &desc->sources[id]; + s->next_enum_id = gr->enum_ids[k]; + } + } + trace_sh_intc_register("group", gr->enum_id, 0xffff, + s->enable_count, s->enable_max); + } + } } static unsigned int sh_intc_register(MemoryRegion *sysmem, - struct intc_desc *desc, - const unsigned long address, - const char *type, - const char *action, - const unsigned int index) + struct intc_desc *desc, + const unsigned long address, + const char *type, + const char *action, + const unsigned int index) { char name[60]; MemoryRegion *iomem, *iomem_p4, *iomem_a7; @@ -315,132 +364,28 @@ static unsigned int sh_intc_register(MemoryRegion *sysmem, } iomem = &desc->iomem; - iomem_p4 = desc->iomem_aliases + index; + iomem_p4 = &desc->iomem_aliases[index]; iomem_a7 = iomem_p4 + 1; -#define SH_INTC_IOMEM_FORMAT "interrupt-controller-%s-%s-%s" - snprintf(name, sizeof(name), SH_INTC_IOMEM_FORMAT, type, action, "p4"); - memory_region_init_alias(iomem_p4, NULL, name, iomem, INTC_A7(address), 4); + snprintf(name, sizeof(name), "intc-%s-%s-%s", type, action, "p4"); + memory_region_init_alias(iomem_p4, NULL, name, iomem, A7ADDR(address), 4); memory_region_add_subregion(sysmem, P4ADDR(address), iomem_p4); - snprintf(name, sizeof(name), SH_INTC_IOMEM_FORMAT, type, action, "a7"); - memory_region_init_alias(iomem_a7, NULL, name, iomem, INTC_A7(address), 4); + snprintf(name, sizeof(name), "intc-%s-%s-%s", type, action, "a7"); + memory_region_init_alias(iomem_a7, NULL, name, iomem, A7ADDR(address), 4); memory_region_add_subregion(sysmem, A7ADDR(address), iomem_a7); -#undef SH_INTC_IOMEM_FORMAT /* used to increment aliases index */ return 2; } -static void sh_intc_register_source(struct intc_desc *desc, - intc_enum source, - struct intc_group *groups, - int nr_groups) -{ - unsigned int i, k; - struct intc_source *s; - - if (desc->mask_regs) { - for (i = 0; i < desc->nr_mask_regs; i++) { - struct intc_mask_reg *mr = desc->mask_regs + i; - - for (k = 0; k < ARRAY_SIZE(mr->enum_ids); k++) { - if (mr->enum_ids[k] != source) - continue; - - s = sh_intc_source(desc, mr->enum_ids[k]); - if (s) - s->enable_max++; - } - } - } - - if (desc->prio_regs) { - for (i = 0; i < desc->nr_prio_regs; i++) { - struct intc_prio_reg *pr = desc->prio_regs + i; - - for (k = 0; k < ARRAY_SIZE(pr->enum_ids); k++) { - if (pr->enum_ids[k] != source) - continue; - - s = sh_intc_source(desc, pr->enum_ids[k]); - if (s) - s->enable_max++; - } - } - } - - if (groups) { - for (i = 0; i < nr_groups; i++) { - struct intc_group *gr = groups + i; - - for (k = 0; k < ARRAY_SIZE(gr->enum_ids); k++) { - if (gr->enum_ids[k] != source) - continue; - - s = sh_intc_source(desc, gr->enum_ids[k]); - if (s) - s->enable_max++; - } - } - } - -} - -void sh_intc_register_sources(struct intc_desc *desc, - struct intc_vect *vectors, - int nr_vectors, - struct intc_group *groups, - int nr_groups) -{ - unsigned int i, k; - struct intc_source *s; - - for (i = 0; i < nr_vectors; i++) { - struct intc_vect *vect = vectors + i; - - sh_intc_register_source(desc, vect->enum_id, groups, nr_groups); - s = sh_intc_source(desc, vect->enum_id); - if (s) { - s->vect = vect->vect; - -#ifdef DEBUG_INTC_SOURCES - printf("sh_intc: registered source %d -> 0x%04x (%d/%d)\n", - vect->enum_id, s->vect, s->enable_count, s->enable_max); -#endif - } - } - - if (groups) { - for (i = 0; i < nr_groups; i++) { - struct intc_group *gr = groups + i; - - s = sh_intc_source(desc, gr->enum_id); - s->next_enum_id = gr->enum_ids[0]; - - for (k = 1; k < ARRAY_SIZE(gr->enum_ids); k++) { - if (!gr->enum_ids[k]) - continue; - - s = sh_intc_source(desc, gr->enum_ids[k - 1]); - s->next_enum_id = gr->enum_ids[k]; - } - -#ifdef DEBUG_INTC_SOURCES - printf("sh_intc: registered group %d (%d/%d)\n", - gr->enum_id, s->enable_count, s->enable_max); -#endif - } - } -} - int sh_intc_init(MemoryRegion *sysmem, - struct intc_desc *desc, - int nr_sources, - struct intc_mask_reg *mask_regs, - int nr_mask_regs, - struct intc_prio_reg *prio_regs, - int nr_prio_regs) + struct intc_desc *desc, + int nr_sources, + struct intc_mask_reg *mask_regs, + int nr_mask_regs, + struct intc_prio_reg *prio_regs, + int nr_prio_regs) { unsigned int i, j; @@ -450,65 +395,55 @@ int sh_intc_init(MemoryRegion *sysmem, desc->nr_mask_regs = nr_mask_regs; desc->prio_regs = prio_regs; desc->nr_prio_regs = nr_prio_regs; - /* Allocate 4 MemoryRegions per register (2 actions * 2 aliases). - **/ + /* Allocate 4 MemoryRegions per register (2 actions * 2 aliases) */ desc->iomem_aliases = g_new0(MemoryRegion, (nr_mask_regs + nr_prio_regs) * 4); - - j = 0; - i = sizeof(struct intc_source) * nr_sources; - desc->sources = g_malloc0(i); - - for (i = 0; i < desc->nr_sources; i++) { - struct intc_source *source = desc->sources + i; - - source->parent = desc; + desc->sources = g_new0(struct intc_source, nr_sources); + for (i = 0; i < nr_sources; i++) { + desc->sources[i].parent = desc; } - desc->irqs = qemu_allocate_irqs(sh_intc_set_irq, desc, nr_sources); - - memory_region_init_io(&desc->iomem, NULL, &sh_intc_ops, desc, - "interrupt-controller", 0x100000000ULL); - -#define INT_REG_PARAMS(reg_struct, type, action, j) \ - reg_struct->action##_reg, #type, #action, j + memory_region_init_io(&desc->iomem, NULL, &sh_intc_ops, desc, "intc", + 0x100000000ULL); + j = 0; if (desc->mask_regs) { for (i = 0; i < desc->nr_mask_regs; i++) { - struct intc_mask_reg *mr = desc->mask_regs + i; + struct intc_mask_reg *mr = &desc->mask_regs[i]; - j += sh_intc_register(sysmem, desc, - INT_REG_PARAMS(mr, mask, set, j)); - j += sh_intc_register(sysmem, desc, - INT_REG_PARAMS(mr, mask, clr, j)); - } + j += sh_intc_register(sysmem, desc, mr->set_reg, "mask", "set", j); + j += sh_intc_register(sysmem, desc, mr->clr_reg, "mask", "clr", j); + } } if (desc->prio_regs) { for (i = 0; i < desc->nr_prio_regs; i++) { - struct intc_prio_reg *pr = desc->prio_regs + i; + struct intc_prio_reg *pr = &desc->prio_regs[i]; - j += sh_intc_register(sysmem, desc, - INT_REG_PARAMS(pr, prio, set, j)); - j += sh_intc_register(sysmem, desc, - INT_REG_PARAMS(pr, prio, clr, j)); - } + j += sh_intc_register(sysmem, desc, pr->set_reg, "prio", "set", j); + j += sh_intc_register(sysmem, desc, pr->clr_reg, "prio", "clr", j); + } } -#undef INT_REG_PARAMS return 0; } -/* Assert level IRL interrupt. - 0:deassert. 1:lowest priority,... 15:highest priority. */ +/* + * Assert level IRL interrupt. + * 0:deassert. 1:lowest priority,... 15:highest priority + */ void sh_intc_set_irl(void *opaque, int n, int level) { struct intc_source *s = opaque; int i, irl = level ^ 15; - for (i = 0; (s = sh_intc_source(s->parent, s->next_enum_id)); i++) { - if (i == irl) - sh_intc_toggle_source(s, s->enable_count?0:1, s->asserted?0:1); - else - if (s->asserted) - sh_intc_toggle_source(s, 0, -1); + intc_enum id = s->next_enum_id; + + for (i = 0; id; id = s->next_enum_id, i++) { + s = &s->parent->sources[id]; + if (i == irl) { + sh_intc_toggle_source(s, s->enable_count ? 0 : 1, + s->asserted ? 0 : 1); + } else if (s->asserted) { + sh_intc_toggle_source(s, 0, -1); + } } } diff --git a/hw/intc/sifive_plic.c b/hw/intc/sifive_plic.c index 9ba36dc0b3..877e76877c 100644 --- a/hw/intc/sifive_plic.c +++ b/hw/intc/sifive_plic.c @@ -355,21 +355,6 @@ static const MemoryRegionOps sifive_plic_ops = { } }; -static Property sifive_plic_properties[] = { - DEFINE_PROP_STRING("hart-config", SiFivePLICState, hart_config), - DEFINE_PROP_UINT32("hartid-base", SiFivePLICState, hartid_base, 0), - DEFINE_PROP_UINT32("num-sources", SiFivePLICState, num_sources, 0), - DEFINE_PROP_UINT32("num-priorities", SiFivePLICState, num_priorities, 0), - DEFINE_PROP_UINT32("priority-base", SiFivePLICState, priority_base, 0), - DEFINE_PROP_UINT32("pending-base", SiFivePLICState, pending_base, 0), - DEFINE_PROP_UINT32("enable-base", SiFivePLICState, enable_base, 0), - DEFINE_PROP_UINT32("enable-stride", SiFivePLICState, enable_stride, 0), - DEFINE_PROP_UINT32("context-base", SiFivePLICState, context_base, 0), - DEFINE_PROP_UINT32("context-stride", SiFivePLICState, context_stride, 0), - DEFINE_PROP_UINT32("aperture-size", SiFivePLICState, aperture_size, 0), - DEFINE_PROP_END_OF_LIST(), -}; - /* * parse PLIC hart/mode address offset config * @@ -427,45 +412,46 @@ static void parse_hart_config(SiFivePLICState *plic) static void sifive_plic_irq_request(void *opaque, int irq, int level) { - SiFivePLICState *plic = opaque; - if (RISCV_DEBUG_PLIC) { - qemu_log("sifive_plic_irq_request: irq=%d level=%d\n", irq, level); - } - sifive_plic_set_pending(plic, irq, level > 0); - sifive_plic_update(plic); + SiFivePLICState *s = opaque; + + sifive_plic_set_pending(s, irq, level > 0); + sifive_plic_update(s); } static void sifive_plic_realize(DeviceState *dev, Error **errp) { - SiFivePLICState *plic = SIFIVE_PLIC(dev); + SiFivePLICState *s = SIFIVE_PLIC(dev); int i; - memory_region_init_io(&plic->mmio, OBJECT(dev), &sifive_plic_ops, plic, - TYPE_SIFIVE_PLIC, plic->aperture_size); - parse_hart_config(plic); - plic->bitfield_words = (plic->num_sources + 31) >> 5; - plic->num_enables = plic->bitfield_words * plic->num_addrs; - plic->source_priority = g_new0(uint32_t, plic->num_sources); - plic->target_priority = g_new(uint32_t, plic->num_addrs); - plic->pending = g_new0(uint32_t, plic->bitfield_words); - plic->claimed = g_new0(uint32_t, plic->bitfield_words); - plic->enable = g_new0(uint32_t, plic->num_enables); - sysbus_init_mmio(SYS_BUS_DEVICE(dev), &plic->mmio); - qdev_init_gpio_in(dev, sifive_plic_irq_request, plic->num_sources); + memory_region_init_io(&s->mmio, OBJECT(dev), &sifive_plic_ops, s, + TYPE_SIFIVE_PLIC, s->aperture_size); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mmio); - plic->s_external_irqs = g_malloc(sizeof(qemu_irq) * plic->num_harts); - qdev_init_gpio_out(dev, plic->s_external_irqs, plic->num_harts); + parse_hart_config(s); - plic->m_external_irqs = g_malloc(sizeof(qemu_irq) * plic->num_harts); - qdev_init_gpio_out(dev, plic->m_external_irqs, plic->num_harts); + s->bitfield_words = (s->num_sources + 31) >> 5; + s->num_enables = s->bitfield_words * s->num_addrs; + s->source_priority = g_new0(uint32_t, s->num_sources); + s->target_priority = g_new(uint32_t, s->num_addrs); + s->pending = g_new0(uint32_t, s->bitfield_words); + s->claimed = g_new0(uint32_t, s->bitfield_words); + s->enable = g_new0(uint32_t, s->num_enables); + + qdev_init_gpio_in(dev, sifive_plic_irq_request, s->num_sources); + + s->s_external_irqs = g_malloc(sizeof(qemu_irq) * s->num_harts); + qdev_init_gpio_out(dev, s->s_external_irqs, s->num_harts); + + s->m_external_irqs = g_malloc(sizeof(qemu_irq) * s->num_harts); + qdev_init_gpio_out(dev, s->m_external_irqs, s->num_harts); /* We can't allow the supervisor to control SEIP as this would allow the * supervisor to clear a pending external interrupt which will result in * lost a interrupt in the case a PLIC is attached. The SEIP bit must be * hardware controlled when a PLIC is attached. */ - for (i = 0; i < plic->num_harts; i++) { - RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(plic->hartid_base + i)); + for (i = 0; i < s->num_harts; i++) { + RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(s->hartid_base + i)); if (riscv_cpu_claim_interrupts(cpu, MIP_SEIP) < 0) { error_report("SEIP already claimed"); exit(1); @@ -496,6 +482,21 @@ static const VMStateDescription vmstate_sifive_plic = { } }; +static Property sifive_plic_properties[] = { + DEFINE_PROP_STRING("hart-config", SiFivePLICState, hart_config), + DEFINE_PROP_UINT32("hartid-base", SiFivePLICState, hartid_base, 0), + DEFINE_PROP_UINT32("num-sources", SiFivePLICState, num_sources, 0), + DEFINE_PROP_UINT32("num-priorities", SiFivePLICState, num_priorities, 0), + DEFINE_PROP_UINT32("priority-base", SiFivePLICState, priority_base, 0), + DEFINE_PROP_UINT32("pending-base", SiFivePLICState, pending_base, 0), + DEFINE_PROP_UINT32("enable-base", SiFivePLICState, enable_base, 0), + DEFINE_PROP_UINT32("enable-stride", SiFivePLICState, enable_stride, 0), + DEFINE_PROP_UINT32("context-base", SiFivePLICState, context_base, 0), + DEFINE_PROP_UINT32("context-stride", SiFivePLICState, context_stride, 0), + DEFINE_PROP_UINT32("aperture-size", SiFivePLICState, aperture_size, 0), + DEFINE_PROP_END_OF_LIST(), +}; + static void sifive_plic_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c index 89cfa018f5..4ec659b93e 100644 --- a/hw/intc/spapr_xive.c +++ b/hw/intc/spapr_xive.c @@ -185,7 +185,7 @@ static void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon) xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI", pq & XIVE_ESB_VAL_P ? 'P' : '-', pq & XIVE_ESB_VAL_Q ? 'Q' : '-', - xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' ', + xive_source_is_asserted(xsrc, i) ? 'A' : ' ', xive_eas_is_masked(eas) ? "M" : " ", (int) xive_get_field64(EAS_END_DATA, eas->w)); diff --git a/hw/intc/spapr_xive_kvm.c b/hw/intc/spapr_xive_kvm.c index 3e534b9685..61fe7bd2d3 100644 --- a/hw/intc/spapr_xive_kvm.c +++ b/hw/intc/spapr_xive_kvm.c @@ -236,11 +236,13 @@ int kvmppc_xive_source_reset_one(XiveSource *xsrc, int srcno, Error **errp) SpaprXive *xive = SPAPR_XIVE(xsrc->xive); uint64_t state = 0; + trace_kvm_xive_source_reset(srcno); + assert(xive->fd != -1); if (xive_source_irq_is_lsi(xsrc, srcno)) { state |= KVM_XIVE_LEVEL_SENSITIVE; - if (xsrc->status[srcno] & XIVE_STATUS_ASSERTED) { + if (xive_source_is_asserted(xsrc, srcno)) { state |= KVM_XIVE_LEVEL_ASSERTED; } } @@ -299,9 +301,7 @@ static uint8_t xive_esb_read(XiveSource *xsrc, int srcno, uint32_t offset) static void kvmppc_xive_esb_trigger(XiveSource *xsrc, int srcno) { - uint64_t *addr = xsrc->esb_mmap + xive_source_esb_page(xsrc, srcno); - - *addr = 0x0; + xive_esb_rw(xsrc, srcno, 0, 0, true); } uint64_t kvmppc_xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset, @@ -311,8 +311,6 @@ uint64_t kvmppc_xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset, return xive_esb_rw(xsrc, srcno, offset, data, 1); } - trace_kvm_xive_source_reset(srcno); - /* * Special Load EOI handling for LSI sources. Q bit is never set * and the interrupt should be re-triggered if the level is still @@ -321,7 +319,7 @@ uint64_t kvmppc_xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset, if (xive_source_irq_is_lsi(xsrc, srcno) && offset == XIVE_ESB_LOAD_EOI) { xive_esb_read(xsrc, srcno, XIVE_ESB_SET_PQ_00); - if (xsrc->status[srcno] & XIVE_STATUS_ASSERTED) { + if (xive_source_is_asserted(xsrc, srcno)) { kvmppc_xive_esb_trigger(xsrc, srcno); } return 0; @@ -359,11 +357,7 @@ void kvmppc_xive_source_set_irq(void *opaque, int srcno, int val) return; } } else { - if (val) { - xsrc->status[srcno] |= XIVE_STATUS_ASSERTED; - } else { - xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED; - } + xive_source_set_asserted(xsrc, srcno, val); } kvmppc_xive_esb_trigger(xsrc, srcno); diff --git a/hw/intc/trace-events b/hw/intc/trace-events index 6a17d38998..9aba7e3a7a 100644 --- a/hw/intc/trace-events +++ b/hw/intc/trace-events @@ -238,3 +238,11 @@ goldfish_pic_write(void *dev, int idx, unsigned int addr, unsigned int size, uin goldfish_pic_reset(void *dev, int idx) "pic: %p goldfish-irq.%d" goldfish_pic_realize(void *dev, int idx) "pic: %p goldfish-irq.%d" goldfish_pic_instance_init(void *dev) "pic: %p goldfish-irq" + +# sh_intc.c +sh_intc_sources(int p, int a, int c, int m, unsigned short v, const char *s1, const char *s2, const char *s3) "(%d/%d/%d/%d) interrupt source 0x%x %s%s%s" +sh_intc_pending(int p, unsigned short v) "(%d) returning interrupt source 0x%x" +sh_intc_register(const char *s, int id, unsigned short v, int c, int m) "%s %u -> 0x%04x (%d/%d)" +sh_intc_read(unsigned size, uint64_t offset, unsigned long val) "size %u 0x%" PRIx64 " -> 0x%lx" +sh_intc_write(unsigned size, uint64_t offset, unsigned long val) "size %u 0x%" PRIx64 " <- 0x%lx" +sh_intc_set(int id, int enable) "setting interrupt group %d to %d" diff --git a/hw/intc/xive.c b/hw/intc/xive.c index b817ee8e37..190194d27f 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -27,17 +27,6 @@ * XIVE Thread Interrupt Management context */ -/* - * Convert a priority number to an Interrupt Pending Buffer (IPB) - * register, which indicates a pending interrupt at the priority - * corresponding to the bit number - */ -static uint8_t priority_to_ipb(uint8_t priority) -{ - return priority > XIVE_PRIORITY_MAX ? - 0 : 1 << (XIVE_PRIORITY_MAX - priority); -} - /* * Convert an Interrupt Pending Buffer (IPB) register to a Pending * Interrupt Priority Register (PIPR), which contains the priority of @@ -89,7 +78,7 @@ static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) regs[TM_CPPR] = cppr; /* Reset the pending buffer bit */ - regs[TM_IPB] &= ~priority_to_ipb(cppr); + regs[TM_IPB] &= ~xive_priority_to_ipb(cppr); regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]); /* Drop Exception bit */ @@ -152,11 +141,6 @@ void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb) xive_tctx_notify(tctx, ring); } -static inline uint32_t xive_tctx_word2(uint8_t *ring) -{ - return *((uint32_t *) &ring[TM_WORD2]); -} - /* * XIVE Thread Interrupt Management Area (TIMA) */ @@ -353,7 +337,7 @@ static void xive_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, static void xive_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size) { - xive_tctx_ipb_update(tctx, TM_QW1_OS, priority_to_ipb(value & 0xff)); + xive_tctx_ipb_update(tctx, TM_QW1_OS, xive_priority_to_ipb(value & 0xff)); } static void xive_os_cam_decode(uint32_t cam, uint8_t *nvt_blk, @@ -891,7 +875,7 @@ static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno) { uint8_t old_pq = xive_source_esb_get(xsrc, srcno); - xsrc->status[srcno] |= XIVE_STATUS_ASSERTED; + xive_source_set_asserted(xsrc, srcno, true); switch (old_pq) { case XIVE_ESB_RESET: @@ -939,7 +923,7 @@ static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno) * notification */ if (xive_source_irq_is_lsi(xsrc, srcno) && - xsrc->status[srcno] & XIVE_STATUS_ASSERTED) { + xive_source_is_asserted(xsrc, srcno)) { ret = xive_source_lsi_trigger(xsrc, srcno); } @@ -1120,7 +1104,7 @@ void xive_source_set_irq(void *opaque, int srcno, int val) if (val) { notify = xive_source_lsi_trigger(xsrc, srcno); } else { - xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED; + xive_source_set_asserted(xsrc, srcno, false); } } else { if (val) { @@ -1149,7 +1133,7 @@ void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, Monitor *mon) xive_source_irq_is_lsi(xsrc, i) ? "LSI" : "MSI", pq & XIVE_ESB_VAL_P ? 'P' : '-', pq & XIVE_ESB_VAL_Q ? 'Q' : '-', - xsrc->status[i] & XIVE_STATUS_ASSERTED ? 'A' : ' '); + xive_source_is_asserted(xsrc, i) ? 'A' : ' '); } } @@ -1535,7 +1519,8 @@ bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, /* handle CPU exception delivery */ if (count) { trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring); - xive_tctx_ipb_update(match.tctx, match.ring, priority_to_ipb(priority)); + xive_tctx_ipb_update(match.tctx, match.ring, + xive_priority_to_ipb(priority)); } return !!count; @@ -1682,7 +1667,8 @@ static void xive_router_end_notify(XiveRouter *xrtr, uint8_t end_blk, * use. The presenter will resend the interrupt when the vCPU * is dispatched again on a HW thread. */ - ipb = xive_get_field32(NVT_W4_IPB, nvt.w4) | priority_to_ipb(priority); + ipb = xive_get_field32(NVT_W4_IPB, nvt.w4) | + xive_priority_to_ipb(priority); nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, ipb); xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4); diff --git a/hw/ipack/ipack.c b/hw/ipack/ipack.c index f19ecaeb1c..ae20f36da6 100644 --- a/hw/ipack/ipack.c +++ b/hw/ipack/ipack.c @@ -30,12 +30,12 @@ IPackDevice *ipack_device_find(IPackBus *bus, int32_t slot) return NULL; } -void ipack_bus_new_inplace(IPackBus *bus, size_t bus_size, - DeviceState *parent, - const char *name, uint8_t n_slots, - qemu_irq_handler handler) +void ipack_bus_init(IPackBus *bus, size_t bus_size, + DeviceState *parent, + uint8_t n_slots, + qemu_irq_handler handler) { - qbus_create_inplace(bus, bus_size, TYPE_IPACK_BUS, parent, name); + qbus_init(bus, bus_size, TYPE_IPACK_BUS, parent, NULL); bus->n_slots = n_slots; bus->set_irq = handler; } diff --git a/hw/ipack/tpci200.c b/hw/ipack/tpci200.c index d107e134c4..1f764fc85b 100644 --- a/hw/ipack/tpci200.c +++ b/hw/ipack/tpci200.c @@ -611,8 +611,8 @@ static void tpci200_realize(PCIDevice *pci_dev, Error **errp) pci_register_bar(&s->dev, 4, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->las2); pci_register_bar(&s->dev, 5, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->las3); - ipack_bus_new_inplace(&s->bus, sizeof(s->bus), DEVICE(pci_dev), NULL, - N_MODULES, tpci200_set_irq); + ipack_bus_init(&s->bus, sizeof(s->bus), DEVICE(pci_dev), + N_MODULES, tpci200_set_irq); } static const VMStateDescription vmstate_tpci200 = { diff --git a/hw/isa/isa-bus.c b/hw/isa/isa-bus.c index cffaa35e9c..6c31398dda 100644 --- a/hw/isa/isa-bus.c +++ b/hw/isa/isa-bus.c @@ -64,7 +64,7 @@ ISABus *isa_bus_new(DeviceState *dev, MemoryRegion* address_space, sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); } - isabus = ISA_BUS(qbus_create(TYPE_ISA_BUS, dev, NULL)); + isabus = ISA_BUS(qbus_new(TYPE_ISA_BUS, dev, NULL)); isabus->address_space = address_space; isabus->address_space_io = address_space_io; return isabus; diff --git a/hw/isa/vt82c686.c b/hw/isa/vt82c686.c index f57f3e7067..8f656251b8 100644 --- a/hw/isa/vt82c686.c +++ b/hw/isa/vt82c686.c @@ -542,6 +542,8 @@ OBJECT_DECLARE_SIMPLE_TYPE(ViaISAState, VIA_ISA) struct ViaISAState { PCIDevice dev; qemu_irq cpu_intr; + qemu_irq *isa_irqs; + ISABus *isa_bus; ViaSuperIOState *via_sio; }; @@ -566,12 +568,42 @@ static const TypeInfo via_isa_info = { }, }; +void via_isa_set_irq(PCIDevice *d, int n, int level) +{ + ViaISAState *s = VIA_ISA(d); + qemu_set_irq(s->isa_irqs[n], level); +} + static void via_isa_request_i8259_irq(void *opaque, int irq, int level) { ViaISAState *s = opaque; qemu_set_irq(s->cpu_intr, level); } +static void via_isa_realize(PCIDevice *d, Error **errp) +{ + ViaISAState *s = VIA_ISA(d); + DeviceState *dev = DEVICE(d); + qemu_irq *isa_irq; + int i; + + qdev_init_gpio_out(dev, &s->cpu_intr, 1); + isa_irq = qemu_allocate_irqs(via_isa_request_i8259_irq, s, 1); + s->isa_bus = isa_bus_new(dev, get_system_memory(), pci_address_space_io(d), + &error_fatal); + s->isa_irqs = i8259_init(s->isa_bus, *isa_irq); + isa_bus_irqs(s->isa_bus, s->isa_irqs); + i8254_pit_init(s->isa_bus, 0x40, 0, NULL); + i8257_dma_init(s->isa_bus, 0); + mc146818_rtc_init(s->isa_bus, 2000, NULL); + + for (i = 0; i < PCI_CONFIG_HEADER_SIZE; i++) { + if (i < PCI_COMMAND || i >= PCI_REVISION_ID) { + d->wmask[i] = 0; + } + } +} + /* TYPE_VT82C686B_ISA */ static void vt82c686b_write_config(PCIDevice *d, uint32_t addr, @@ -610,27 +642,10 @@ static void vt82c686b_isa_reset(DeviceState *dev) static void vt82c686b_realize(PCIDevice *d, Error **errp) { ViaISAState *s = VIA_ISA(d); - DeviceState *dev = DEVICE(d); - ISABus *isa_bus; - qemu_irq *isa_irq; - int i; - qdev_init_gpio_out(dev, &s->cpu_intr, 1); - isa_irq = qemu_allocate_irqs(via_isa_request_i8259_irq, s, 1); - isa_bus = isa_bus_new(dev, get_system_memory(), pci_address_space_io(d), - &error_fatal); - isa_bus_irqs(isa_bus, i8259_init(isa_bus, *isa_irq)); - i8254_pit_init(isa_bus, 0x40, 0, NULL); - i8257_dma_init(isa_bus, 0); - s->via_sio = VIA_SUPERIO(isa_create_simple(isa_bus, + via_isa_realize(d, errp); + s->via_sio = VIA_SUPERIO(isa_create_simple(s->isa_bus, TYPE_VT82C686B_SUPERIO)); - mc146818_rtc_init(isa_bus, 2000, NULL); - - for (i = 0; i < PCI_CONFIG_HEADER_SIZE; i++) { - if (i < PCI_COMMAND || i >= PCI_REVISION_ID) { - d->wmask[i] = 0; - } - } } static void vt82c686b_class_init(ObjectClass *klass, void *data) @@ -691,26 +706,10 @@ static void vt8231_isa_reset(DeviceState *dev) static void vt8231_realize(PCIDevice *d, Error **errp) { ViaISAState *s = VIA_ISA(d); - DeviceState *dev = DEVICE(d); - ISABus *isa_bus; - qemu_irq *isa_irq; - int i; - qdev_init_gpio_out(dev, &s->cpu_intr, 1); - isa_irq = qemu_allocate_irqs(via_isa_request_i8259_irq, s, 1); - isa_bus = isa_bus_new(dev, get_system_memory(), pci_address_space_io(d), - &error_fatal); - isa_bus_irqs(isa_bus, i8259_init(isa_bus, *isa_irq)); - i8254_pit_init(isa_bus, 0x40, 0, NULL); - i8257_dma_init(isa_bus, 0); - s->via_sio = VIA_SUPERIO(isa_create_simple(isa_bus, TYPE_VT8231_SUPERIO)); - mc146818_rtc_init(isa_bus, 2000, NULL); - - for (i = 0; i < PCI_CONFIG_HEADER_SIZE; i++) { - if (i < PCI_COMMAND || i >= PCI_REVISION_ID) { - d->wmask[i] = 0; - } - } + via_isa_realize(d, errp); + s->via_sio = VIA_SUPERIO(isa_create_simple(s->isa_bus, + TYPE_VT8231_SUPERIO)); } static void vt8231_class_init(ObjectClass *klass, void *data) diff --git a/hw/m68k/q800.c b/hw/m68k/q800.c index 5ba87f789c..e4c7c9b88a 100644 --- a/hw/m68k/q800.c +++ b/hw/m68k/q800.c @@ -28,6 +28,7 @@ #include "cpu.h" #include "hw/boards.h" #include "hw/or-irq.h" +#include "hw/nmi.h" #include "elf.h" #include "hw/loader.h" #include "ui/console.h" @@ -67,9 +68,6 @@ #define ASC_BASE (IO_BASE + 0x14000) #define SWIM_BASE (IO_BASE + 0x1E000) -#define NUBUS_SUPER_SLOT_BASE 0x60000000 -#define NUBUS_SLOT_BASE 0xf0000000 - #define SONIC_PROM_SIZE 0x1000 /* @@ -77,10 +75,17 @@ * is needed by the kernel to have early display and * thus provided by the bootloader */ -#define VIDEO_BASE 0xf9001000 +#define VIDEO_BASE 0xf9000000 #define MAC_CLOCK 3686418 +/* + * Slot 0x9 is reserved for use by the in-built framebuffer whilst only + * slots 0xc, 0xd and 0xe physically exist on the Quadra 800 + */ +#define Q800_NUBUS_SLOTS_AVAILABLE (BIT(0x9) | BIT(0xc) | BIT(0xd) | \ + BIT(0xe)) + /* * The GLUE (General Logic Unit) is an Apple custom integrated circuit chip * that performs a variety of functions (RAM management, clock generation, ...). @@ -96,13 +101,110 @@ struct GLUEState { SysBusDevice parent_obj; M68kCPU *cpu; uint8_t ipr; + uint8_t auxmode; + qemu_irq irqs[1]; + QEMUTimer *nmi_release; }; +#define GLUE_IRQ_IN_VIA1 0 +#define GLUE_IRQ_IN_VIA2 1 +#define GLUE_IRQ_IN_SONIC 2 +#define GLUE_IRQ_IN_ESCC 3 +#define GLUE_IRQ_IN_NMI 4 + +#define GLUE_IRQ_NUBUS_9 0 + +/* + * The GLUE logic on the Quadra 800 supports 2 different IRQ routing modes + * controlled from the VIA1 auxmode GPIO (port B bit 6) which are documented + * in NetBSD as follows: + * + * A/UX mode (Linux, NetBSD, auxmode GPIO low) + * + * Level 0: Spurious: ignored + * Level 1: Software + * Level 2: VIA2 (except ethernet, sound) + * Level 3: Ethernet + * Level 4: Serial (SCC) + * Level 5: Sound + * Level 6: VIA1 + * Level 7: NMIs: parity errors, RESET button, YANCC error + * + * Classic mode (default: used by MacOS, A/UX 3.0.1, auxmode GPIO high) + * + * Level 0: Spurious: ignored + * Level 1: VIA1 (clock, ADB) + * Level 2: VIA2 (NuBus, SCSI) + * Level 3: + * Level 4: Serial (SCC) + * Level 5: + * Level 6: + * Level 7: Non-maskable: parity errors, RESET button + * + * Note that despite references to A/UX mode in Linux and NetBSD, at least + * A/UX 3.0.1 still uses Classic mode. + */ + static void GLUE_set_irq(void *opaque, int irq, int level) { GLUEState *s = opaque; int i; + if (s->auxmode) { + /* Classic mode */ + switch (irq) { + case GLUE_IRQ_IN_VIA1: + irq = 0; + break; + + case GLUE_IRQ_IN_VIA2: + irq = 1; + break; + + case GLUE_IRQ_IN_SONIC: + /* Route to VIA2 instead */ + qemu_set_irq(s->irqs[GLUE_IRQ_NUBUS_9], level); + return; + + case GLUE_IRQ_IN_ESCC: + irq = 3; + break; + + case GLUE_IRQ_IN_NMI: + irq = 6; + break; + + default: + g_assert_not_reached(); + } + } else { + /* A/UX mode */ + switch (irq) { + case GLUE_IRQ_IN_VIA1: + irq = 5; + break; + + case GLUE_IRQ_IN_VIA2: + irq = 1; + break; + + case GLUE_IRQ_IN_SONIC: + irq = 2; + break; + + case GLUE_IRQ_IN_ESCC: + irq = 3; + break; + + case GLUE_IRQ_IN_NMI: + irq = 6; + break; + + default: + g_assert_not_reached(); + } + } + if (level) { s->ipr |= 1 << irq; } else { @@ -118,11 +220,37 @@ static void GLUE_set_irq(void *opaque, int irq, int level) m68k_set_irq_level(s->cpu, 0, 0); } +static void glue_auxmode_set_irq(void *opaque, int irq, int level) +{ + GLUEState *s = GLUE(opaque); + + s->auxmode = level; +} + +static void glue_nmi(NMIState *n, int cpu_index, Error **errp) +{ + GLUEState *s = GLUE(n); + + /* Hold NMI active for 100ms */ + GLUE_set_irq(s, GLUE_IRQ_IN_NMI, 1); + timer_mod(s->nmi_release, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 100); +} + +static void glue_nmi_release(void *opaque) +{ + GLUEState *s = GLUE(opaque); + + GLUE_set_irq(s, GLUE_IRQ_IN_NMI, 0); +} + static void glue_reset(DeviceState *dev) { GLUEState *s = GLUE(dev); s->ipr = 0; + s->auxmode = 0; + + timer_del(s->nmi_release); } static const VMStateDescription vmstate_glue = { @@ -131,6 +259,8 @@ static const VMStateDescription vmstate_glue = { .minimum_version_id = 0, .fields = (VMStateField[]) { VMSTATE_UINT8(ipr, GLUEState), + VMSTATE_UINT8(auxmode, GLUEState), + VMSTATE_TIMER_PTR(nmi_release, GLUEState), VMSTATE_END_OF_LIST(), }, }; @@ -146,20 +276,36 @@ static Property glue_properties[] = { DEFINE_PROP_END_OF_LIST(), }; +static void glue_finalize(Object *obj) +{ + GLUEState *s = GLUE(obj); + + timer_free(s->nmi_release); +} + static void glue_init(Object *obj) { DeviceState *dev = DEVICE(obj); + GLUEState *s = GLUE(dev); qdev_init_gpio_in(dev, GLUE_set_irq, 8); + qdev_init_gpio_in_named(dev, glue_auxmode_set_irq, "auxmode", 1); + + qdev_init_gpio_out(dev, s->irqs, 1); + + /* NMI release timer */ + s->nmi_release = timer_new_ms(QEMU_CLOCK_VIRTUAL, glue_nmi_release, s); } static void glue_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + NMIClass *nc = NMI_CLASS(klass); dc->vmsd = &vmstate_glue; dc->reset = glue_reset; device_class_set_props(dc, glue_properties); + nc->nmi_monitor_handler = glue_nmi; } static const TypeInfo glue_info = { @@ -167,7 +313,12 @@ static const TypeInfo glue_info = { .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(GLUEState), .instance_init = glue_init, + .instance_finalize = glue_finalize, .class_init = glue_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_NMI }, + { } + }, }; static void main_cpu_reset(void *opaque) @@ -217,6 +368,7 @@ static void q800_init(MachineState *machine) uint8_t *prom; const int io_slice_nb = (IO_SIZE / IO_SLICE) - 1; int i, checksum; + MacFbMode *macfb_mode; ram_addr_t ram_size = machine->ram_size; const char *kernel_filename = machine->kernel_filename; const char *initrd_filename = machine->initrd_filename; @@ -279,7 +431,10 @@ static void q800_init(MachineState *machine) sysbus = SYS_BUS_DEVICE(via1_dev); sysbus_realize_and_unref(sysbus, &error_fatal); sysbus_mmio_map(sysbus, 1, VIA_BASE); - sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(glue, 0)); + sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(glue, GLUE_IRQ_IN_VIA1)); + /* A/UX mode */ + qdev_connect_gpio_out(via1_dev, 0, + qdev_get_gpio_in_named(glue, "auxmode", 0)); adb_bus = qdev_get_child_bus(via1_dev, "adb.0"); dev = qdev_new(TYPE_ADB_KEYBOARD); @@ -292,7 +447,7 @@ static void q800_init(MachineState *machine) sysbus = SYS_BUS_DEVICE(via2_dev); sysbus_realize_and_unref(sysbus, &error_fatal); sysbus_mmio_map(sysbus, 1, VIA_BASE + VIA_SIZE); - sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(glue, 1)); + sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(glue, GLUE_IRQ_IN_VIA2)); /* MACSONIC */ @@ -325,7 +480,7 @@ static void q800_init(MachineState *machine) sysbus = SYS_BUS_DEVICE(dev); sysbus_realize_and_unref(sysbus, &error_fatal); sysbus_mmio_map(sysbus, 0, SONIC_BASE); - sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(glue, 2)); + sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(glue, GLUE_IRQ_IN_SONIC)); memory_region_init_rom(dp8393x_prom, NULL, "dp8393x-q800.prom", SONIC_PROM_SIZE, &error_fatal); @@ -361,7 +516,8 @@ static void q800_init(MachineState *machine) qdev_realize_and_unref(escc_orgate, NULL, &error_fatal); sysbus_connect_irq(sysbus, 0, qdev_get_gpio_in(escc_orgate, 0)); sysbus_connect_irq(sysbus, 1, qdev_get_gpio_in(escc_orgate, 1)); - qdev_connect_gpio_out(DEVICE(escc_orgate), 0, qdev_get_gpio_in(glue, 3)); + qdev_connect_gpio_out(DEVICE(escc_orgate), 0, + qdev_get_gpio_in(glue, GLUE_IRQ_IN_ESCC)); sysbus_mmio_map(sysbus, 0, SCC_BASE); /* SCSI */ @@ -395,20 +551,48 @@ static void q800_init(MachineState *machine) /* NuBus */ dev = qdev_new(TYPE_MAC_NUBUS_BRIDGE); + qdev_prop_set_uint32(dev, "slot-available-mask", + Q800_NUBUS_SLOTS_AVAILABLE); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, NUBUS_SUPER_SLOT_BASE); - sysbus_mmio_map(SYS_BUS_DEVICE(dev), 1, NUBUS_SLOT_BASE); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, + MAC_NUBUS_FIRST_SLOT * NUBUS_SUPER_SLOT_SIZE); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 1, NUBUS_SLOT_BASE + + MAC_NUBUS_FIRST_SLOT * NUBUS_SLOT_SIZE); + qdev_connect_gpio_out(dev, 9, + qdev_get_gpio_in_named(via2_dev, "nubus-irq", + VIA2_NUBUS_IRQ_INTVIDEO)); + for (i = 1; i < VIA2_NUBUS_IRQ_NB; i++) { + qdev_connect_gpio_out(dev, 9 + i, + qdev_get_gpio_in_named(via2_dev, "nubus-irq", + VIA2_NUBUS_IRQ_9 + i)); + } - nubus = MAC_NUBUS_BRIDGE(dev)->bus; + /* + * Since the framebuffer in slot 0x9 uses a separate IRQ, wire the unused + * IRQ via GLUE for use by SONIC Ethernet in classic mode + */ + qdev_connect_gpio_out(glue, GLUE_IRQ_NUBUS_9, + qdev_get_gpio_in_named(via2_dev, "nubus-irq", + VIA2_NUBUS_IRQ_9)); + + nubus = &NUBUS_BRIDGE(dev)->bus; /* framebuffer in nubus slot #9 */ dev = qdev_new(TYPE_NUBUS_MACFB); + qdev_prop_set_uint32(dev, "slot", 9); qdev_prop_set_uint32(dev, "width", graphic_width); qdev_prop_set_uint32(dev, "height", graphic_height); qdev_prop_set_uint8(dev, "depth", graphic_depth); + if (graphic_width == 1152 && graphic_height == 870) { + qdev_prop_set_uint8(dev, "display", MACFB_DISPLAY_APPLE_21_COLOR); + } else { + qdev_prop_set_uint8(dev, "display", MACFB_DISPLAY_VGA); + } qdev_realize_and_unref(dev, BUS(nubus), &error_fatal); + macfb_mode = (NUBUS_MACFB(dev)->macfb).mode; + cs = CPU(cpu); if (linux_boot) { uint64_t high; @@ -431,12 +615,12 @@ static void q800_init(MachineState *machine) BOOTINFO1(cs->as, parameters_base, BI_MAC_MEMSIZE, ram_size >> 20); /* in MB */ BOOTINFO2(cs->as, parameters_base, BI_MEMCHUNK, 0, ram_size); - BOOTINFO1(cs->as, parameters_base, BI_MAC_VADDR, VIDEO_BASE); + BOOTINFO1(cs->as, parameters_base, BI_MAC_VADDR, + VIDEO_BASE + macfb_mode->offset); BOOTINFO1(cs->as, parameters_base, BI_MAC_VDEPTH, graphic_depth); BOOTINFO1(cs->as, parameters_base, BI_MAC_VDIM, (graphic_height << 16) | graphic_width); - BOOTINFO1(cs->as, parameters_base, BI_MAC_VROW, - (graphic_width * graphic_depth + 7) / 8); + BOOTINFO1(cs->as, parameters_base, BI_MAC_VROW, macfb_mode->stride); BOOTINFO1(cs->as, parameters_base, BI_MAC_SCCBASE, SCC_BASE); rom = g_malloc(sizeof(*rom)); diff --git a/hw/m68k/virt.c b/hw/m68k/virt.c index 4e8bce5aa6..0efa4a45c7 100644 --- a/hw/m68k/virt.c +++ b/hw/m68k/virt.c @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * QEMU Vitual M68K Machine * @@ -304,7 +304,21 @@ type_init(virt_machine_register_types) } \ type_init(machvirt_machine_##major##_##minor##_init); -static void virt_machine_6_0_options(MachineClass *mc) +static void virt_machine_6_2_options(MachineClass *mc) { } -DEFINE_VIRT_MACHINE(6, 0, true) +DEFINE_VIRT_MACHINE(6, 2, true) + +static void virt_machine_6_1_options(MachineClass *mc) +{ + virt_machine_6_2_options(mc); + compat_props_add(mc->compat_props, hw_compat_6_1, hw_compat_6_1_len); +} +DEFINE_VIRT_MACHINE(6, 1, false) + +static void virt_machine_6_0_options(MachineClass *mc) +{ + virt_machine_6_1_options(mc); + compat_props_add(mc->compat_props, hw_compat_6_0, hw_compat_6_0_len); +} +DEFINE_VIRT_MACHINE(6, 0, false) diff --git a/hw/mem/Kconfig b/hw/mem/Kconfig index 8b19fdc49f..03dbb3c7df 100644 --- a/hw/mem/Kconfig +++ b/hw/mem/Kconfig @@ -8,3 +8,6 @@ config MEM_DEVICE config NVDIMM bool select MEM_DEVICE + +config SPARSE_MEM + bool diff --git a/hw/mem/meson.build b/hw/mem/meson.build index 3c8fdef9f9..82f86d117e 100644 --- a/hw/mem/meson.build +++ b/hw/mem/meson.build @@ -6,4 +6,4 @@ mem_ss.add(when: 'CONFIG_NVDIMM', if_true: files('nvdimm.c')) softmmu_ss.add_all(when: 'CONFIG_MEM_DEVICE', if_true: mem_ss) -softmmu_ss.add(when: 'CONFIG_FUZZ', if_true: files('sparse-mem.c')) +softmmu_ss.add(when: 'CONFIG_SPARSE_MEM', if_true: files('sparse-mem.c')) diff --git a/hw/mips/boston.c b/hw/mips/boston.c index 20b06865b2..0e3cca5511 100644 --- a/hw/mips/boston.c +++ b/hw/mips/boston.c @@ -20,6 +20,7 @@ #include "qemu/osdep.h" #include "qemu/units.h" +#include "elf.h" #include "hw/boards.h" #include "hw/char/serial.h" #include "hw/ide/pci.h" @@ -48,6 +49,15 @@ typedef struct BostonState BostonState; DECLARE_INSTANCE_CHECKER(BostonState, BOSTON, TYPE_BOSTON) +#define FDT_IRQ_TYPE_NONE 0 +#define FDT_IRQ_TYPE_LEVEL_HIGH 4 +#define FDT_GIC_SHARED 0 +#define FDT_GIC_LOCAL 1 +#define FDT_BOSTON_CLK_SYS 1 +#define FDT_BOSTON_CLK_CPU 2 +#define FDT_PCI_IRQ_MAP_PINS 4 +#define FDT_PCI_IRQ_MAP_DESCS 6 + struct BostonState { SysBusDevice parent_obj; @@ -64,6 +74,44 @@ struct BostonState { hwaddr fdt_base; }; +enum { + BOSTON_LOWDDR, + BOSTON_PCIE0, + BOSTON_PCIE1, + BOSTON_PCIE2, + BOSTON_PCIE2_MMIO, + BOSTON_CM, + BOSTON_GIC, + BOSTON_CDMM, + BOSTON_CPC, + BOSTON_PLATREG, + BOSTON_UART, + BOSTON_LCD, + BOSTON_FLASH, + BOSTON_PCIE1_MMIO, + BOSTON_PCIE0_MMIO, + BOSTON_HIGHDDR, +}; + +static const MemMapEntry boston_memmap[] = { + [BOSTON_LOWDDR] = { 0x0, 0x10000000 }, + [BOSTON_PCIE0] = { 0x10000000, 0x2000000 }, + [BOSTON_PCIE1] = { 0x12000000, 0x2000000 }, + [BOSTON_PCIE2] = { 0x14000000, 0x2000000 }, + [BOSTON_PCIE2_MMIO] = { 0x16000000, 0x100000 }, + [BOSTON_CM] = { 0x16100000, 0x20000 }, + [BOSTON_GIC] = { 0x16120000, 0x20000 }, + [BOSTON_CDMM] = { 0x16140000, 0x8000 }, + [BOSTON_CPC] = { 0x16200000, 0x8000 }, + [BOSTON_PLATREG] = { 0x17ffd000, 0x1000 }, + [BOSTON_UART] = { 0x17ffe000, 0x20 }, + [BOSTON_LCD] = { 0x17fff000, 0x8 }, + [BOSTON_FLASH] = { 0x18000000, 0x8000000 }, + [BOSTON_PCIE1_MMIO] = { 0x20000000, 0x20000000 }, + [BOSTON_PCIE0_MMIO] = { 0x40000000, 0x40000000 }, + [BOSTON_HIGHDDR] = { 0x80000000, 0x0 }, +}; + enum boston_plat_reg { PLAT_FPGA_BUILD = 0x00, PLAT_CORE_CL = 0x04, @@ -275,24 +323,24 @@ type_init(boston_register_types) static void gen_firmware(uint32_t *p, hwaddr kernel_entry, hwaddr fdt_addr) { - const uint32_t cm_base = 0x16100000; - const uint32_t gic_base = 0x16120000; - const uint32_t cpc_base = 0x16200000; + uint64_t regaddr; /* Move CM GCRs */ - bl_gen_write_ulong(&p, - cpu_mips_phys_to_kseg1(NULL, GCR_BASE_ADDR + GCR_BASE_OFS), - cm_base); + regaddr = cpu_mips_phys_to_kseg1(NULL, GCR_BASE_ADDR + GCR_BASE_OFS), + bl_gen_write_ulong(&p, regaddr, + boston_memmap[BOSTON_CM].base); /* Move & enable GIC GCRs */ - bl_gen_write_ulong(&p, - cpu_mips_phys_to_kseg1(NULL, cm_base + GCR_GIC_BASE_OFS), - gic_base | GCR_GIC_BASE_GICEN_MSK); + regaddr = cpu_mips_phys_to_kseg1(NULL, boston_memmap[BOSTON_CM].base + + GCR_GIC_BASE_OFS), + bl_gen_write_ulong(&p, regaddr, + boston_memmap[BOSTON_GIC].base | GCR_GIC_BASE_GICEN_MSK); /* Move & enable CPC GCRs */ - bl_gen_write_ulong(&p, - cpu_mips_phys_to_kseg1(NULL, cm_base + GCR_CPC_BASE_OFS), - cpc_base | GCR_CPC_BASE_CPCEN_MSK); + regaddr = cpu_mips_phys_to_kseg1(NULL, boston_memmap[BOSTON_CM].base + + GCR_CPC_BASE_OFS), + bl_gen_write_ulong(&p, regaddr, + boston_memmap[BOSTON_CPC].base | GCR_CPC_BASE_CPCEN_MSK); /* * Setup argument registers to follow the UHI boot protocol: @@ -333,8 +381,9 @@ static const void *boston_fdt_filter(void *opaque, const void *fdt_orig, ram_low_sz = MIN(256 * MiB, machine->ram_size); ram_high_sz = machine->ram_size - ram_low_sz; qemu_fdt_setprop_sized_cells(fdt, "/memory@0", "reg", - 1, 0x00000000, 1, ram_low_sz, - 1, 0x90000000, 1, ram_high_sz); + 1, boston_memmap[BOSTON_LOWDDR].base, 1, ram_low_sz, + 1, boston_memmap[BOSTON_HIGHDDR].base + ram_low_sz, + 1, ram_high_sz); fdt = g_realloc(fdt, fdt_totalsize(fdt)); qemu_fdt_dumpdtb(fdt, fdt_sz); @@ -397,6 +446,222 @@ xilinx_pcie_init(MemoryRegion *sys_mem, uint32_t bus_nr, return XILINX_PCIE_HOST(dev); } + +static void fdt_create_pcie(void *fdt, int gic_ph, int irq, hwaddr reg_base, + hwaddr reg_size, hwaddr mmio_base, hwaddr mmio_size) +{ + int i; + char *name, *intc_name; + uint32_t intc_ph; + uint32_t interrupt_map[FDT_PCI_IRQ_MAP_PINS][FDT_PCI_IRQ_MAP_DESCS]; + + intc_ph = qemu_fdt_alloc_phandle(fdt); + name = g_strdup_printf("/soc/pci@%" HWADDR_PRIx, reg_base); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", + "xlnx,axi-pcie-host-1.00.a"); + qemu_fdt_setprop_string(fdt, name, "device_type", "pci"); + qemu_fdt_setprop_cells(fdt, name, "reg", reg_base, reg_size); + + qemu_fdt_setprop_cell(fdt, name, "#address-cells", 3); + qemu_fdt_setprop_cell(fdt, name, "#size-cells", 2); + qemu_fdt_setprop_cell(fdt, name, "#interrupt-cells", 1); + + qemu_fdt_setprop_cell(fdt, name, "interrupt-parent", gic_ph); + qemu_fdt_setprop_cells(fdt, name, "interrupts", FDT_GIC_SHARED, irq, + FDT_IRQ_TYPE_LEVEL_HIGH); + + qemu_fdt_setprop_cells(fdt, name, "ranges", 0x02000000, 0, mmio_base, + mmio_base, 0, mmio_size); + qemu_fdt_setprop_cells(fdt, name, "bus-range", 0x00, 0xff); + + + + intc_name = g_strdup_printf("%s/interrupt-controller", name); + qemu_fdt_add_subnode(fdt, intc_name); + qemu_fdt_setprop(fdt, intc_name, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(fdt, intc_name, "#address-cells", 0); + qemu_fdt_setprop_cell(fdt, intc_name, "#interrupt-cells", 1); + qemu_fdt_setprop_cell(fdt, intc_name, "phandle", intc_ph); + + qemu_fdt_setprop_cells(fdt, name, "interrupt-map-mask", 0, 0, 0, 7); + for (i = 0; i < FDT_PCI_IRQ_MAP_PINS; i++) { + uint32_t *irqmap = interrupt_map[i]; + + irqmap[0] = cpu_to_be32(0); + irqmap[1] = cpu_to_be32(0); + irqmap[2] = cpu_to_be32(0); + irqmap[3] = cpu_to_be32(i + 1); + irqmap[4] = cpu_to_be32(intc_ph); + irqmap[5] = cpu_to_be32(i + 1); + } + qemu_fdt_setprop(fdt, name, "interrupt-map", + &interrupt_map, sizeof(interrupt_map)); + + g_free(intc_name); + g_free(name); +} + +static const void *create_fdt(BostonState *s, + const MemMapEntry *memmap, int *dt_size) +{ + void *fdt; + int cpu; + MachineState *mc = s->mach; + uint32_t platreg_ph, gic_ph, clk_ph; + char *name, *gic_name, *platreg_name, *stdout_name; + static const char * const syscon_compat[2] = { + "img,boston-platform-regs", "syscon" + }; + + fdt = create_device_tree(dt_size); + if (!fdt) { + error_report("create_device_tree() failed"); + exit(1); + } + + platreg_ph = qemu_fdt_alloc_phandle(fdt); + gic_ph = qemu_fdt_alloc_phandle(fdt); + clk_ph = qemu_fdt_alloc_phandle(fdt); + + qemu_fdt_setprop_string(fdt, "/", "model", "img,boston"); + qemu_fdt_setprop_string(fdt, "/", "compatible", "img,boston"); + qemu_fdt_setprop_cell(fdt, "/", "#size-cells", 0x1); + qemu_fdt_setprop_cell(fdt, "/", "#address-cells", 0x1); + + + qemu_fdt_add_subnode(fdt, "/cpus"); + qemu_fdt_setprop_cell(fdt, "/cpus", "#size-cells", 0x0); + qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 0x1); + + for (cpu = 0; cpu < mc->smp.cpus; cpu++) { + name = g_strdup_printf("/cpus/cpu@%d", cpu); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "img,mips"); + qemu_fdt_setprop_string(fdt, name, "status", "okay"); + qemu_fdt_setprop_cell(fdt, name, "reg", cpu); + qemu_fdt_setprop_string(fdt, name, "device_type", "cpu"); + qemu_fdt_setprop_cells(fdt, name, "clocks", clk_ph, FDT_BOSTON_CLK_CPU); + g_free(name); + } + + qemu_fdt_add_subnode(fdt, "/soc"); + qemu_fdt_setprop(fdt, "/soc", "ranges", NULL, 0); + qemu_fdt_setprop_string(fdt, "/soc", "compatible", "simple-bus"); + qemu_fdt_setprop_cell(fdt, "/soc", "#size-cells", 0x1); + qemu_fdt_setprop_cell(fdt, "/soc", "#address-cells", 0x1); + + fdt_create_pcie(fdt, gic_ph, 2, + memmap[BOSTON_PCIE0].base, memmap[BOSTON_PCIE0].size, + memmap[BOSTON_PCIE0_MMIO].base, memmap[BOSTON_PCIE0_MMIO].size); + + fdt_create_pcie(fdt, gic_ph, 1, + memmap[BOSTON_PCIE1].base, memmap[BOSTON_PCIE1].size, + memmap[BOSTON_PCIE1_MMIO].base, memmap[BOSTON_PCIE1_MMIO].size); + + fdt_create_pcie(fdt, gic_ph, 0, + memmap[BOSTON_PCIE2].base, memmap[BOSTON_PCIE2].size, + memmap[BOSTON_PCIE2_MMIO].base, memmap[BOSTON_PCIE2_MMIO].size); + + /* GIC with it's timer node */ + gic_name = g_strdup_printf("/soc/interrupt-controller@%" HWADDR_PRIx, + memmap[BOSTON_GIC].base); + qemu_fdt_add_subnode(fdt, gic_name); + qemu_fdt_setprop_string(fdt, gic_name, "compatible", "mti,gic"); + qemu_fdt_setprop_cells(fdt, gic_name, "reg", memmap[BOSTON_GIC].base, + memmap[BOSTON_GIC].size); + qemu_fdt_setprop(fdt, gic_name, "interrupt-controller", NULL, 0); + qemu_fdt_setprop_cell(fdt, gic_name, "#interrupt-cells", 3); + qemu_fdt_setprop_cell(fdt, gic_name, "phandle", gic_ph); + + name = g_strdup_printf("%s/timer", gic_name); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "mti,gic-timer"); + qemu_fdt_setprop_cells(fdt, name, "interrupts", FDT_GIC_LOCAL, 1, + FDT_IRQ_TYPE_NONE); + qemu_fdt_setprop_cells(fdt, name, "clocks", clk_ph, FDT_BOSTON_CLK_CPU); + g_free(name); + g_free(gic_name); + + /* CDMM node */ + name = g_strdup_printf("/soc/cdmm@%" HWADDR_PRIx, memmap[BOSTON_CDMM].base); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "mti,mips-cdmm"); + qemu_fdt_setprop_cells(fdt, name, "reg", memmap[BOSTON_CDMM].base, + memmap[BOSTON_CDMM].size); + g_free(name); + + /* CPC node */ + name = g_strdup_printf("/soc/cpc@%" HWADDR_PRIx, memmap[BOSTON_CPC].base); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "mti,mips-cpc"); + qemu_fdt_setprop_cells(fdt, name, "reg", memmap[BOSTON_CPC].base, + memmap[BOSTON_CPC].size); + g_free(name); + + /* platreg and it's clk node */ + platreg_name = g_strdup_printf("/soc/system-controller@%" HWADDR_PRIx, + memmap[BOSTON_PLATREG].base); + qemu_fdt_add_subnode(fdt, platreg_name); + qemu_fdt_setprop_string_array(fdt, platreg_name, "compatible", + (char **)&syscon_compat, + ARRAY_SIZE(syscon_compat)); + qemu_fdt_setprop_cells(fdt, platreg_name, "reg", + memmap[BOSTON_PLATREG].base, + memmap[BOSTON_PLATREG].size); + qemu_fdt_setprop_cell(fdt, platreg_name, "phandle", platreg_ph); + + name = g_strdup_printf("%s/clock", platreg_name); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "img,boston-clock"); + qemu_fdt_setprop_cell(fdt, name, "#clock-cells", 1); + qemu_fdt_setprop_cell(fdt, name, "phandle", clk_ph); + g_free(name); + g_free(platreg_name); + + /* reboot node */ + name = g_strdup_printf("/soc/reboot"); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "syscon-reboot"); + qemu_fdt_setprop_cell(fdt, name, "regmap", platreg_ph); + qemu_fdt_setprop_cell(fdt, name, "offset", 0x10); + qemu_fdt_setprop_cell(fdt, name, "mask", 0x10); + g_free(name); + + /* uart node */ + name = g_strdup_printf("/soc/uart@%" HWADDR_PRIx, memmap[BOSTON_UART].base); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "ns16550a"); + qemu_fdt_setprop_cells(fdt, name, "reg", memmap[BOSTON_UART].base, + memmap[BOSTON_UART].size); + qemu_fdt_setprop_cell(fdt, name, "reg-shift", 0x2); + qemu_fdt_setprop_cell(fdt, name, "interrupt-parent", gic_ph); + qemu_fdt_setprop_cells(fdt, name, "interrupts", FDT_GIC_SHARED, 3, + FDT_IRQ_TYPE_LEVEL_HIGH); + qemu_fdt_setprop_cells(fdt, name, "clocks", clk_ph, FDT_BOSTON_CLK_SYS); + + qemu_fdt_add_subnode(fdt, "/chosen"); + stdout_name = g_strdup_printf("%s:115200", name); + qemu_fdt_setprop_string(fdt, "/chosen", "stdout-path", stdout_name); + g_free(stdout_name); + g_free(name); + + /* lcd node */ + name = g_strdup_printf("/soc/lcd@%" HWADDR_PRIx, memmap[BOSTON_LCD].base); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "compatible", "img,boston-lcd"); + qemu_fdt_setprop_cells(fdt, name, "reg", memmap[BOSTON_LCD].base, + memmap[BOSTON_LCD].size); + g_free(name); + + name = g_strdup_printf("/memory@0"); + qemu_fdt_add_subnode(fdt, name); + qemu_fdt_setprop_string(fdt, name, "device_type", "memory"); + g_free(name); + + return fdt; +} + static void boston_mach_init(MachineState *machine) { DeviceState *dev; @@ -438,11 +703,15 @@ static void boston_mach_init(MachineState *machine) sysbus_mmio_map_overlap(SYS_BUS_DEVICE(&s->cps), 0, 0, 1); flash = g_new(MemoryRegion, 1); - memory_region_init_rom(flash, NULL, "boston.flash", 128 * MiB, - &error_fatal); - memory_region_add_subregion_overlap(sys_mem, 0x18000000, flash, 0); + memory_region_init_rom(flash, NULL, "boston.flash", + boston_memmap[BOSTON_FLASH].size, &error_fatal); + memory_region_add_subregion_overlap(sys_mem, + boston_memmap[BOSTON_FLASH].base, + flash, 0); - memory_region_add_subregion_overlap(sys_mem, 0x80000000, machine->ram, 0); + memory_region_add_subregion_overlap(sys_mem, + boston_memmap[BOSTON_HIGHDDR].base, + machine->ram, 0); ddr_low_alias = g_new(MemoryRegion, 1); memory_region_init_alias(ddr_low_alias, NULL, "boston_low.ddr", @@ -451,32 +720,41 @@ static void boston_mach_init(MachineState *machine) memory_region_add_subregion_overlap(sys_mem, 0, ddr_low_alias, 0); xilinx_pcie_init(sys_mem, 0, - 0x10000000, 32 * MiB, - 0x40000000, 1 * GiB, + boston_memmap[BOSTON_PCIE0].base, + boston_memmap[BOSTON_PCIE0].size, + boston_memmap[BOSTON_PCIE0_MMIO].base, + boston_memmap[BOSTON_PCIE0_MMIO].size, get_cps_irq(&s->cps, 2), false); xilinx_pcie_init(sys_mem, 1, - 0x12000000, 32 * MiB, - 0x20000000, 512 * MiB, + boston_memmap[BOSTON_PCIE1].base, + boston_memmap[BOSTON_PCIE1].size, + boston_memmap[BOSTON_PCIE1_MMIO].base, + boston_memmap[BOSTON_PCIE1_MMIO].size, get_cps_irq(&s->cps, 1), false); pcie2 = xilinx_pcie_init(sys_mem, 2, - 0x14000000, 32 * MiB, - 0x16000000, 1 * MiB, + boston_memmap[BOSTON_PCIE2].base, + boston_memmap[BOSTON_PCIE2].size, + boston_memmap[BOSTON_PCIE2_MMIO].base, + boston_memmap[BOSTON_PCIE2_MMIO].size, get_cps_irq(&s->cps, 0), true); platreg = g_new(MemoryRegion, 1); memory_region_init_io(platreg, NULL, &boston_platreg_ops, s, - "boston-platregs", 0x1000); - memory_region_add_subregion_overlap(sys_mem, 0x17ffd000, platreg, 0); + "boston-platregs", + boston_memmap[BOSTON_PLATREG].size); + memory_region_add_subregion_overlap(sys_mem, + boston_memmap[BOSTON_PLATREG].base, platreg, 0); - s->uart = serial_mm_init(sys_mem, 0x17ffe000, 2, + s->uart = serial_mm_init(sys_mem, boston_memmap[BOSTON_UART].base, 2, get_cps_irq(&s->cps, 3), 10000000, serial_hd(0), DEVICE_NATIVE_ENDIAN); lcd = g_new(MemoryRegion, 1); memory_region_init_io(lcd, NULL, &boston_lcd_ops, s, "boston-lcd", 0x8); - memory_region_add_subregion_overlap(sys_mem, 0x17fff000, lcd, 0); + memory_region_add_subregion_overlap(sys_mem, + boston_memmap[BOSTON_LCD].base, lcd, 0); chr = qemu_chr_new("lcd", "vc:320x240", NULL); qemu_chr_fe_init(&s->lcd_display, chr, NULL); @@ -499,10 +777,39 @@ static void boston_mach_init(MachineState *machine) exit(1); } } else if (machine->kernel_filename) { - fit_err = load_fit(&boston_fit_loader, machine->kernel_filename, s); - if (fit_err) { - error_report("unable to load FIT image"); - exit(1); + uint64_t kernel_entry, kernel_high, kernel_size; + + kernel_size = load_elf(machine->kernel_filename, NULL, + cpu_mips_kseg0_to_phys, NULL, + &kernel_entry, NULL, &kernel_high, + NULL, 0, EM_MIPS, 1, 0); + + if (kernel_size) { + int dt_size; + g_autofree const void *dtb_file_data, *dtb_load_data; + hwaddr dtb_paddr = QEMU_ALIGN_UP(kernel_high, 64 * KiB); + hwaddr dtb_vaddr = cpu_mips_phys_to_kseg0(NULL, dtb_paddr); + + s->kernel_entry = kernel_entry; + if (machine->dtb) { + dtb_file_data = load_device_tree(machine->dtb, &dt_size); + } else { + dtb_file_data = create_fdt(s, boston_memmap, &dt_size); + } + + dtb_load_data = boston_fdt_filter(s, dtb_file_data, + NULL, &dtb_vaddr); + + /* Calculate real fdt size after filter */ + dt_size = fdt_totalsize(dtb_load_data); + rom_add_blob_fixed("dtb", dtb_load_data, dt_size, dtb_paddr); + } else { + /* Try to load file as FIT */ + fit_err = load_fit(&boston_fit_loader, machine->kernel_filename, s); + if (fit_err) { + error_report("unable to load kernel image"); + exit(1); + } } gen_firmware(memory_region_get_ram_ptr(flash) + 0x7c00000, diff --git a/hw/misc/auxbus.c b/hw/misc/auxbus.c index 434ff8d910..8a8012f5f0 100644 --- a/hw/misc/auxbus.c +++ b/hw/misc/auxbus.c @@ -65,7 +65,7 @@ AUXBus *aux_bus_init(DeviceState *parent, const char *name) AUXBus *bus; Object *auxtoi2c; - bus = AUX_BUS(qbus_create(TYPE_AUX_BUS, parent, name)); + bus = AUX_BUS(qbus_new(TYPE_AUX_BUS, parent, name)); auxtoi2c = object_new_with_props(TYPE_AUXTOI2C, OBJECT(bus), "i2c", &error_abort, NULL); diff --git a/hw/misc/mac_via.c b/hw/misc/mac_via.c index d1abcd97b5..b378e6b305 100644 --- a/hw/misc/mac_via.c +++ b/hw/misc/mac_via.c @@ -130,6 +130,10 @@ * On SE/30, vertical sync interrupt enable. * 0=enabled. This vSync interrupt shows up * as a slot $E interrupt. + * On Quadra 800 this bit toggles A/UX mode which + * configures the glue logic to deliver some IRQs + * at different levels compared to a classic + * Mac. */ #define VIA1B_vADBS2 0x20 /* ADB state input bit 1 (unused on IIfx) */ #define VIA1B_vADBS1 0x10 /* ADB state input bit 0 (unused on IIfx) */ @@ -876,6 +880,21 @@ static void via1_adb_update(MOS6522Q800VIA1State *v1s) } } +static void via1_auxmode_update(MOS6522Q800VIA1State *v1s) +{ + MOS6522State *s = MOS6522(v1s); + int oldirq, irq; + + oldirq = (v1s->last_b & VIA1B_vMystery) ? 1 : 0; + irq = (s->b & VIA1B_vMystery) ? 1 : 0; + + /* Check to see if the A/UX mode bit has changed */ + if (irq != oldirq) { + trace_via1_auxmode(irq); + qemu_set_irq(v1s->auxmode_irq, irq); + } +} + static uint64_t mos6522_q800_via1_read(void *opaque, hwaddr addr, unsigned size) { MOS6522Q800VIA1State *s = MOS6522_Q800_VIA1(opaque); @@ -898,6 +917,7 @@ static void mos6522_q800_via1_write(void *opaque, hwaddr addr, uint64_t val, case VIA_REG_B: via1_rtc_update(v1s); via1_adb_update(v1s); + via1_auxmode_update(v1s); v1s->last_b = ms->b; break; @@ -1038,10 +1058,13 @@ static void mos6522_q800_via1_init(Object *obj) sysbus_init_mmio(sbd, &v1s->via_mem); /* ADB */ - qbus_create_inplace((BusState *)&v1s->adb_bus, sizeof(v1s->adb_bus), - TYPE_ADB_BUS, DEVICE(v1s), "adb.0"); + qbus_init((BusState *)&v1s->adb_bus, sizeof(v1s->adb_bus), + TYPE_ADB_BUS, DEVICE(v1s), "adb.0"); qdev_init_gpio_in(DEVICE(obj), via1_irq_request, VIA1_IRQ_NB); + + /* A/UX mode */ + qdev_init_gpio_out(DEVICE(obj), &v1s->auxmode_irq, 1); } static const VMStateDescription vmstate_q800_via1 = { diff --git a/hw/misc/macio/cuda.c b/hw/misc/macio/cuda.c index edbd4186b2..e917a6a095 100644 --- a/hw/misc/macio/cuda.c +++ b/hw/misc/macio/cuda.c @@ -553,8 +553,8 @@ static void cuda_init(Object *obj) memory_region_init_io(&s->mem, obj, &mos6522_cuda_ops, s, "cuda", 0x2000); sysbus_init_mmio(sbd, &s->mem); - qbus_create_inplace(&s->adb_bus, sizeof(s->adb_bus), TYPE_ADB_BUS, - DEVICE(obj), "adb.0"); + qbus_init(&s->adb_bus, sizeof(s->adb_bus), TYPE_ADB_BUS, + DEVICE(obj), "adb.0"); } static Property cuda_properties[] = { diff --git a/hw/misc/macio/macio.c b/hw/misc/macio/macio.c index 4515296e66..c1fad43f6c 100644 --- a/hw/misc/macio/macio.c +++ b/hw/misc/macio/macio.c @@ -387,8 +387,8 @@ static void macio_instance_init(Object *obj) memory_region_init(&s->bar, obj, "macio", 0x80000); - qbus_create_inplace(&s->macio_bus, sizeof(s->macio_bus), TYPE_MACIO_BUS, - DEVICE(obj), "macio.0"); + qbus_init(&s->macio_bus, sizeof(s->macio_bus), TYPE_MACIO_BUS, + DEVICE(obj), "macio.0"); object_initialize_child(OBJECT(s), "dbdma", &s->dbdma, TYPE_MAC_DBDMA); diff --git a/hw/misc/macio/pmu.c b/hw/misc/macio/pmu.c index 71924d4768..4ad4f50e08 100644 --- a/hw/misc/macio/pmu.c +++ b/hw/misc/macio/pmu.c @@ -754,8 +754,8 @@ static void pmu_realize(DeviceState *dev, Error **errp) timer_mod(s->one_sec_timer, s->one_sec_target); if (s->has_adb) { - qbus_create_inplace(&s->adb_bus, sizeof(s->adb_bus), TYPE_ADB_BUS, - dev, "adb.0"); + qbus_init(&s->adb_bus, sizeof(s->adb_bus), TYPE_ADB_BUS, + dev, "adb.0"); adb_register_autopoll_callback(adb_bus, pmu_adb_poll, s); } } diff --git a/hw/misc/sga.c b/hw/misc/sga.c index 4dbe6d78f9..1d04672b01 100644 --- a/hw/misc/sga.c +++ b/hw/misc/sga.c @@ -30,6 +30,7 @@ #include "hw/loader.h" #include "qemu/module.h" #include "qom/object.h" +#include "qemu/error-report.h" #define SGABIOS_FILENAME "sgabios.bin" @@ -42,6 +43,7 @@ struct ISASGAState { static void sga_realizefn(DeviceState *dev, Error **errp) { + warn_report("-device sga is deprecated, use -machine graphics=off"); rom_add_vga(SGABIOS_FILENAME); } diff --git a/hw/misc/trace-events b/hw/misc/trace-events index ede413965b..2da96d167a 100644 --- a/hw/misc/trace-events +++ b/hw/misc/trace-events @@ -228,6 +228,7 @@ via1_rtc_cmd_pram_sect_write(int sector, int offset, int addr, int value) "secto via1_adb_send(const char *state, uint8_t data, const char *vadbint) "state %s data=0x%02x vADBInt=%s" via1_adb_receive(const char *state, uint8_t data, const char *vadbint, int status, int index, int size) "state %s data=0x%02x vADBInt=%s status=0x%x index=%d size=%d" via1_adb_poll(uint8_t data, const char *vadbint, int status, int index, int size) "data=0x%02x vADBInt=%s status=0x%x index=%d size=%d" +via1_auxmode(int mode) "setting auxmode to %d" # grlib_ahb_apb_pnp.c grlib_ahb_pnp_read(uint64_t addr, uint32_t value) "AHB PnP read addr:0x%03"PRIx64" data:0x%08x" diff --git a/hw/misc/virt_ctrl.c b/hw/misc/virt_ctrl.c index 3552d7a09a..e75d1e7e17 100644 --- a/hw/misc/virt_ctrl.c +++ b/hw/misc/virt_ctrl.c @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * Virt system Controller */ diff --git a/hw/net/e1000.c b/hw/net/e1000.c index a30546c5d5..f5bc81296d 100644 --- a/hw/net/e1000.c +++ b/hw/net/e1000.c @@ -107,6 +107,7 @@ struct E1000State_st { e1000x_txd_props props; e1000x_txd_props tso_props; uint16_t tso_frames; + bool busy; } tx; struct { @@ -763,6 +764,11 @@ start_xmit(E1000State *s) return; } + if (s->tx.busy) { + return; + } + s->tx.busy = true; + while (s->mac_reg[TDH] != s->mac_reg[TDT]) { base = tx_desc_base(s) + sizeof(struct e1000_tx_desc) * s->mac_reg[TDH]; @@ -789,6 +795,7 @@ start_xmit(E1000State *s) break; } } + s->tx.busy = false; set_ics(s, 0, cause); } diff --git a/hw/net/vhost_net-stub.c b/hw/net/vhost_net-stub.c index a7f4252630..89d71cfb8e 100644 --- a/hw/net/vhost_net-stub.c +++ b/hw/net/vhost_net-stub.c @@ -33,13 +33,13 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options) int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, - int total_queues) + int data_queue_pairs, int cvq) { return -ENOSYS; } void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, - int total_queues) + int data_queue_pairs, int cvq) { } diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c index 386ec2eaa2..0d888f29a6 100644 --- a/hw/net/vhost_net.c +++ b/hw/net/vhost_net.c @@ -231,9 +231,11 @@ fail: return NULL; } -static void vhost_net_set_vq_index(struct vhost_net *net, int vq_index) +static void vhost_net_set_vq_index(struct vhost_net *net, int vq_index, + int last_index) { net->dev.vq_index = vq_index; + net->dev.last_index = last_index; } static int vhost_net_start_one(struct vhost_net *net, @@ -315,25 +317,37 @@ static void vhost_net_stop_one(struct vhost_net *net, } int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, - int total_queues) + int data_queue_pairs, int cvq) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev))); VirtioBusState *vbus = VIRTIO_BUS(qbus); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); + int total_notifiers = data_queue_pairs * 2 + cvq; + VirtIONet *n = VIRTIO_NET(dev); + int nvhosts = data_queue_pairs + cvq; struct vhost_net *net; - int r, e, i; + int r, e, i, last_index = data_queue_pairs * 2; NetClientState *peer; + if (!cvq) { + last_index -= 1; + } + if (!k->set_guest_notifiers) { error_report("binding does not support guest notifiers"); return -ENOSYS; } - for (i = 0; i < total_queues; i++) { + for (i = 0; i < nvhosts; i++) { + + if (i < data_queue_pairs) { + peer = qemu_get_peer(ncs, i); + } else { /* Control Virtqueue */ + peer = qemu_get_peer(ncs, n->max_queue_pairs); + } - peer = qemu_get_peer(ncs, i); net = get_vhost_net(peer); - vhost_net_set_vq_index(net, i * 2); + vhost_net_set_vq_index(net, i * 2, last_index); /* Suppress the masking guest notifiers on vhost user * because vhost user doesn't interrupt masking/unmasking @@ -344,14 +358,18 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, } } - r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true); + r = k->set_guest_notifiers(qbus->parent, total_notifiers, true); if (r < 0) { error_report("Error binding guest notifier: %d", -r); goto err; } - for (i = 0; i < total_queues; i++) { - peer = qemu_get_peer(ncs, i); + for (i = 0; i < nvhosts; i++) { + if (i < data_queue_pairs) { + peer = qemu_get_peer(ncs, i); + } else { + peer = qemu_get_peer(ncs, n->max_queue_pairs); + } r = vhost_net_start_one(get_vhost_net(peer), dev); if (r < 0) { @@ -375,7 +393,7 @@ err_start: peer = qemu_get_peer(ncs , i); vhost_net_stop_one(get_vhost_net(peer), dev); } - e = k->set_guest_notifiers(qbus->parent, total_queues * 2, false); + e = k->set_guest_notifiers(qbus->parent, total_notifiers, false); if (e < 0) { fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e); fflush(stderr); @@ -385,18 +403,27 @@ err: } void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, - int total_queues) + int data_queue_pairs, int cvq) { BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev))); VirtioBusState *vbus = VIRTIO_BUS(qbus); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(vbus); + VirtIONet *n = VIRTIO_NET(dev); + NetClientState *peer; + int total_notifiers = data_queue_pairs * 2 + cvq; + int nvhosts = data_queue_pairs + cvq; int i, r; - for (i = 0; i < total_queues; i++) { - vhost_net_stop_one(get_vhost_net(ncs[i].peer), dev); + for (i = 0; i < nvhosts; i++) { + if (i < data_queue_pairs) { + peer = qemu_get_peer(ncs, i); + } else { + peer = qemu_get_peer(ncs, n->max_queue_pairs); + } + vhost_net_stop_one(get_vhost_net(peer), dev); } - r = k->set_guest_notifiers(qbus->parent, total_queues * 2, false); + r = k->set_guest_notifiers(qbus->parent, total_notifiers, false); if (r < 0) { fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", r); fflush(stderr); diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index f205331dcf..f2014d5ea0 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -54,7 +54,7 @@ #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256 #define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256 -/* for now, only allow larger queues; with virtio-1, guest can downsize */ +/* for now, only allow larger queue_pairs; with virtio-1, guest can downsize */ #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE #define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE @@ -131,7 +131,7 @@ static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config) int ret = 0; memset(&netcfg, 0 , sizeof(struct virtio_net_config)); virtio_stw_p(vdev, &netcfg.status, n->status); - virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues); + virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queue_pairs); virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu); memcpy(netcfg.mac, n->mac, ETH_ALEN); virtio_stl_p(vdev, &netcfg.speed, n->net_conf.speed); @@ -243,7 +243,8 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) { VirtIODevice *vdev = VIRTIO_DEVICE(n); NetClientState *nc = qemu_get_queue(n->nic); - int queues = n->multiqueue ? n->max_queues : 1; + int queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; + int cvq = n->max_ncs - n->max_queue_pairs; if (!get_vhost_net(nc->peer)) { return; @@ -266,7 +267,7 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) /* Any packets outstanding? Purge them to avoid touching rings * when vhost is running. */ - for (i = 0; i < queues; i++) { + for (i = 0; i < queue_pairs; i++) { NetClientState *qnc = qemu_get_subqueue(n->nic, i); /* Purge both directions: TX and RX. */ @@ -285,14 +286,14 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) } n->vhost_started = 1; - r = vhost_net_start(vdev, n->nic->ncs, queues); + r = vhost_net_start(vdev, n->nic->ncs, queue_pairs, cvq); if (r < 0) { error_report("unable to start vhost net: %d: " "falling back on userspace virtio", -r); n->vhost_started = 0; } } else { - vhost_net_stop(vdev, n->nic->ncs, queues); + vhost_net_stop(vdev, n->nic->ncs, queue_pairs, cvq); n->vhost_started = 0; } } @@ -309,11 +310,11 @@ static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev, } static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs, - int queues, bool enable) + int queue_pairs, bool enable) { int i; - for (i = 0; i < queues; i++) { + for (i = 0; i < queue_pairs; i++) { if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 && enable) { while (--i >= 0) { @@ -330,7 +331,7 @@ static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs, static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status) { VirtIODevice *vdev = VIRTIO_DEVICE(n); - int queues = n->multiqueue ? n->max_queues : 1; + int queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; if (virtio_net_started(n, status)) { /* Before using the device, we tell the network backend about the @@ -339,14 +340,14 @@ static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status) * virtio-net code. */ n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs, - queues, true); + queue_pairs, true); } else if (virtio_net_started(n, vdev->status)) { /* After using the device, we need to reset the network backend to * the default (guest native endianness), otherwise the guest may * lose network connectivity if it is rebooted into a different * endianness. */ - virtio_net_set_vnet_endian(vdev, n->nic->ncs, queues, false); + virtio_net_set_vnet_endian(vdev, n->nic->ncs, queue_pairs, false); } } @@ -368,12 +369,12 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status) virtio_net_vnet_endian_status(n, status); virtio_net_vhost_status(n, status); - for (i = 0; i < n->max_queues; i++) { + for (i = 0; i < n->max_queue_pairs; i++) { NetClientState *ncs = qemu_get_subqueue(n->nic, i); bool queue_started; q = &n->vqs[i]; - if ((!n->multiqueue && i != 0) || i >= n->curr_queues) { + if ((!n->multiqueue && i != 0) || i >= n->curr_queue_pairs) { queue_status = 0; } else { queue_status = status; @@ -540,7 +541,7 @@ static void virtio_net_reset(VirtIODevice *vdev) n->nouni = 0; n->nobcast = 0; /* multiqueue is disabled by default */ - n->curr_queues = 1; + n->curr_queue_pairs = 1; timer_del(n->announce_timer.tm); n->announce_timer.round = 0; n->status &= ~VIRTIO_NET_S_ANNOUNCE; @@ -556,7 +557,7 @@ static void virtio_net_reset(VirtIODevice *vdev) memset(n->vlans, 0, MAX_VLAN >> 3); /* Flush any async TX */ - for (i = 0; i < n->max_queues; i++) { + for (i = 0; i < n->max_queue_pairs; i++) { NetClientState *nc = qemu_get_subqueue(n->nic, i); if (nc->peer) { @@ -610,7 +611,7 @@ static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs, sizeof(struct virtio_net_hdr); } - for (i = 0; i < n->max_queues; i++) { + for (i = 0; i < n->max_queue_pairs; i++) { nc = qemu_get_subqueue(n->nic, i); if (peer_has_vnet_hdr(n) && @@ -655,7 +656,7 @@ static int peer_attach(VirtIONet *n, int index) return 0; } - if (n->max_queues == 1) { + if (n->max_queue_pairs == 1) { return 0; } @@ -681,7 +682,7 @@ static int peer_detach(VirtIONet *n, int index) return tap_disable(nc->peer); } -static void virtio_net_set_queues(VirtIONet *n) +static void virtio_net_set_queue_pairs(VirtIONet *n) { int i; int r; @@ -690,8 +691,8 @@ static void virtio_net_set_queues(VirtIONet *n) return; } - for (i = 0; i < n->max_queues; i++) { - if (i < n->curr_queues) { + for (i = 0; i < n->max_queue_pairs; i++) { + if (i < n->curr_queue_pairs) { r = peer_attach(n, i); assert(!r); } else { @@ -796,48 +797,34 @@ static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n) typedef struct { VirtIONet *n; - char *id; -} FailoverId; + DeviceState *dev; +} FailoverDevice; /** - * Set the id of the failover primary device + * Set the failover primary device * * @opaque: FailoverId to setup * @opts: opts for device we are handling * @errp: returns an error if this function fails */ -static int failover_set_primary(void *opaque, QemuOpts *opts, Error **errp) +static int failover_set_primary(DeviceState *dev, void *opaque) { - FailoverId *fid = opaque; - const char *standby_id = qemu_opt_get(opts, "failover_pair_id"); + FailoverDevice *fdev = opaque; + PCIDevice *pci_dev = (PCIDevice *) + object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE); - if (g_strcmp0(standby_id, fid->n->netclient_name) == 0) { - fid->id = g_strdup(opts->id); + if (!pci_dev) { + return 0; + } + + if (!g_strcmp0(pci_dev->failover_pair_id, fdev->n->netclient_name)) { + fdev->dev = dev; return 1; } return 0; } -/** - * Find the primary device id for this failover virtio-net - * - * @n: VirtIONet device - * @errp: returns an error if this function fails - */ -static char *failover_find_primary_device_id(VirtIONet *n) -{ - Error *err = NULL; - FailoverId fid; - - fid.n = n; - if (!qemu_opts_foreach(qemu_find_opts("device"), - failover_set_primary, &fid, &err)) { - return NULL; - } - return fid.id; -} - /** * Find the primary device for this failover virtio-net * @@ -846,39 +833,38 @@ static char *failover_find_primary_device_id(VirtIONet *n) */ static DeviceState *failover_find_primary_device(VirtIONet *n) { - char *id = failover_find_primary_device_id(n); + FailoverDevice fdev = { + .n = n, + }; - if (!id) { - return NULL; - } - - return qdev_find_recursive(sysbus_get_default(), id); + qbus_walk_children(sysbus_get_default(), failover_set_primary, NULL, + NULL, NULL, &fdev); + return fdev.dev; } static void failover_add_primary(VirtIONet *n, Error **errp) { Error *err = NULL; - QemuOpts *opts; - char *id; DeviceState *dev = failover_find_primary_device(n); if (dev) { return; } - id = failover_find_primary_device_id(n); - if (!id) { + if (!n->primary_opts) { error_setg(errp, "Primary device not found"); error_append_hint(errp, "Virtio-net failover will not work. Make " "sure primary device has parameter" " failover_pair_id=%s\n", n->netclient_name); return; } - opts = qemu_opts_find(qemu_find_opts("device"), id); - g_assert(opts); /* cannot be NULL because id was found using opts list */ - dev = qdev_device_add(opts, &err); + + dev = qdev_device_add_from_qdict(n->primary_opts, + n->primary_opts_from_json, + &err); if (err) { - qemu_opts_del(opts); + qobject_unref(n->primary_opts); + n->primary_opts = NULL; } else { object_unref(OBJECT(dev)); } @@ -920,7 +906,7 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features) virtio_net_apply_guest_offloads(n); } - for (i = 0; i < n->max_queues; i++) { + for (i = 0; i < n->max_queue_pairs; i++) { NetClientState *nc = qemu_get_subqueue(n->nic, i); if (!get_vhost_net(nc->peer)) { @@ -1247,7 +1233,7 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n, VirtIODevice *vdev = VIRTIO_DEVICE(n); struct virtio_net_rss_config cfg; size_t s, offset = 0, size_get; - uint16_t queues, i; + uint16_t queue_pairs, i; struct { uint16_t us; uint8_t b; @@ -1289,7 +1275,7 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n, } n->rss_data.default_queue = do_rss ? virtio_lduw_p(vdev, &cfg.unclassified_queue) : 0; - if (n->rss_data.default_queue >= n->max_queues) { + if (n->rss_data.default_queue >= n->max_queue_pairs) { err_msg = "Invalid default queue"; err_value = n->rss_data.default_queue; goto error; @@ -1318,14 +1304,14 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n, size_get = sizeof(temp); s = iov_to_buf(iov, iov_cnt, offset, &temp, size_get); if (s != size_get) { - err_msg = "Can't get queues"; + err_msg = "Can't get queue_pairs"; err_value = (uint32_t)s; goto error; } - queues = do_rss ? virtio_lduw_p(vdev, &temp.us) : n->curr_queues; - if (queues == 0 || queues > n->max_queues) { - err_msg = "Invalid number of queues"; - err_value = queues; + queue_pairs = do_rss ? virtio_lduw_p(vdev, &temp.us) : n->curr_queue_pairs; + if (queue_pairs == 0 || queue_pairs > n->max_queue_pairs) { + err_msg = "Invalid number of queue_pairs"; + err_value = queue_pairs; goto error; } if (temp.b > VIRTIO_NET_RSS_MAX_KEY_SIZE) { @@ -1340,7 +1326,7 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n, } if (!temp.b && !n->rss_data.hash_types) { virtio_net_disable_rss(n); - return queues; + return queue_pairs; } offset += size_get; size_get = temp.b; @@ -1373,7 +1359,7 @@ static uint16_t virtio_net_handle_rss(VirtIONet *n, trace_virtio_net_rss_enable(n->rss_data.hash_types, n->rss_data.indirections_len, temp.b); - return queues; + return queue_pairs; error: trace_virtio_net_rss_error(err_msg, err_value); virtio_net_disable_rss(n); @@ -1384,15 +1370,15 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, struct iovec *iov, unsigned int iov_cnt) { VirtIODevice *vdev = VIRTIO_DEVICE(n); - uint16_t queues; + uint16_t queue_pairs; virtio_net_disable_rss(n); if (cmd == VIRTIO_NET_CTRL_MQ_HASH_CONFIG) { - queues = virtio_net_handle_rss(n, iov, iov_cnt, false); - return queues ? VIRTIO_NET_OK : VIRTIO_NET_ERR; + queue_pairs = virtio_net_handle_rss(n, iov, iov_cnt, false); + return queue_pairs ? VIRTIO_NET_OK : VIRTIO_NET_ERR; } if (cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) { - queues = virtio_net_handle_rss(n, iov, iov_cnt, true); + queue_pairs = virtio_net_handle_rss(n, iov, iov_cnt, true); } else if (cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { struct virtio_net_ctrl_mq mq; size_t s; @@ -1403,24 +1389,24 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, if (s != sizeof(mq)) { return VIRTIO_NET_ERR; } - queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs); + queue_pairs = virtio_lduw_p(vdev, &mq.virtqueue_pairs); } else { return VIRTIO_NET_ERR; } - if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || - queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || - queues > n->max_queues || + if (queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || + queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || + queue_pairs > n->max_queue_pairs || !n->multiqueue) { return VIRTIO_NET_ERR; } - n->curr_queues = queues; - /* stop the backend before changing the number of queues to avoid handling a + n->curr_queue_pairs = queue_pairs; + /* stop the backend before changing the number of queue_pairs to avoid handling a * disabled queue */ virtio_net_set_status(vdev, vdev->status); - virtio_net_set_queues(n); + virtio_net_set_queue_pairs(n); return VIRTIO_NET_OK; } @@ -1498,7 +1484,7 @@ static bool virtio_net_can_receive(NetClientState *nc) return false; } - if (nc->queue_index >= n->curr_queues) { + if (nc->queue_index >= n->curr_queue_pairs) { return false; } @@ -2778,11 +2764,11 @@ static void virtio_net_del_queue(VirtIONet *n, int index) virtio_del_queue(vdev, index * 2 + 1); } -static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues) +static void virtio_net_change_num_queue_pairs(VirtIONet *n, int new_max_queue_pairs) { VirtIODevice *vdev = VIRTIO_DEVICE(n); int old_num_queues = virtio_get_num_queues(vdev); - int new_num_queues = new_max_queues * 2 + 1; + int new_num_queues = new_max_queue_pairs * 2 + 1; int i; assert(old_num_queues >= 3); @@ -2815,12 +2801,12 @@ static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues) static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue) { - int max = multiqueue ? n->max_queues : 1; + int max = multiqueue ? n->max_queue_pairs : 1; n->multiqueue = multiqueue; - virtio_net_change_num_queues(n, max); + virtio_net_change_num_queue_pairs(n, max); - virtio_net_set_queues(n); + virtio_net_set_queue_pairs(n); } static int virtio_net_post_load_device(void *opaque, int version_id) @@ -2853,7 +2839,7 @@ static int virtio_net_post_load_device(void *opaque, int version_id) */ n->saved_guest_offloads = n->curr_guest_offloads; - virtio_net_set_queues(n); + virtio_net_set_queue_pairs(n); /* Find the first multicast entry in the saved MAC filter */ for (i = 0; i < n->mac_table.in_use; i++) { @@ -2866,7 +2852,7 @@ static int virtio_net_post_load_device(void *opaque, int version_id) /* nc.link_down can't be migrated, so infer link_down according * to link status bit in n->status */ link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0; - for (i = 0; i < n->max_queues; i++) { + for (i = 0; i < n->max_queue_pairs; i++) { qemu_get_subqueue(n->nic, i)->link_down = link_down; } @@ -2931,9 +2917,9 @@ static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = { }, }; -static bool max_queues_gt_1(void *opaque, int version_id) +static bool max_queue_pairs_gt_1(void *opaque, int version_id) { - return VIRTIO_NET(opaque)->max_queues > 1; + return VIRTIO_NET(opaque)->max_queue_pairs > 1; } static bool has_ctrl_guest_offloads(void *opaque, int version_id) @@ -2958,13 +2944,13 @@ static bool mac_table_doesnt_fit(void *opaque, int version_id) struct VirtIONetMigTmp { VirtIONet *parent; VirtIONetQueue *vqs_1; - uint16_t curr_queues_1; + uint16_t curr_queue_pairs_1; uint8_t has_ufo; uint32_t has_vnet_hdr; }; /* The 2nd and subsequent tx_waiting flags are loaded later than - * the 1st entry in the queues and only if there's more than one + * the 1st entry in the queue_pairs and only if there's more than one * entry. We use the tmp mechanism to calculate a temporary * pointer and count and also validate the count. */ @@ -2974,9 +2960,9 @@ static int virtio_net_tx_waiting_pre_save(void *opaque) struct VirtIONetMigTmp *tmp = opaque; tmp->vqs_1 = tmp->parent->vqs + 1; - tmp->curr_queues_1 = tmp->parent->curr_queues - 1; - if (tmp->parent->curr_queues == 0) { - tmp->curr_queues_1 = 0; + tmp->curr_queue_pairs_1 = tmp->parent->curr_queue_pairs - 1; + if (tmp->parent->curr_queue_pairs == 0) { + tmp->curr_queue_pairs_1 = 0; } return 0; @@ -2989,9 +2975,9 @@ static int virtio_net_tx_waiting_pre_load(void *opaque) /* Reuse the pointer setup from save */ virtio_net_tx_waiting_pre_save(opaque); - if (tmp->parent->curr_queues > tmp->parent->max_queues) { - error_report("virtio-net: curr_queues %x > max_queues %x", - tmp->parent->curr_queues, tmp->parent->max_queues); + if (tmp->parent->curr_queue_pairs > tmp->parent->max_queue_pairs) { + error_report("virtio-net: curr_queue_pairs %x > max_queue_pairs %x", + tmp->parent->curr_queue_pairs, tmp->parent->max_queue_pairs); return -EINVAL; } @@ -3005,7 +2991,7 @@ static const VMStateDescription vmstate_virtio_net_tx_waiting = { .pre_save = virtio_net_tx_waiting_pre_save, .fields = (VMStateField[]) { VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp, - curr_queues_1, + curr_queue_pairs_1, vmstate_virtio_net_queue_tx_waiting, struct VirtIONetQueue), VMSTATE_END_OF_LIST() @@ -3147,9 +3133,9 @@ static const VMStateDescription vmstate_virtio_net_device = { VMSTATE_UINT8(nobcast, VirtIONet), VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp, vmstate_virtio_net_has_ufo), - VMSTATE_SINGLE_TEST(max_queues, VirtIONet, max_queues_gt_1, 0, + VMSTATE_SINGLE_TEST(max_queue_pairs, VirtIONet, max_queue_pairs_gt_1, 0, vmstate_info_uint16_equal, uint16_t), - VMSTATE_UINT16_TEST(curr_queues, VirtIONet, max_queues_gt_1), + VMSTATE_UINT16_TEST(curr_queue_pairs, VirtIONet, max_queue_pairs_gt_1), VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp, vmstate_virtio_net_tx_waiting), VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet, @@ -3304,7 +3290,9 @@ static void virtio_net_migration_state_notifier(Notifier *notifier, void *data) } static bool failover_hide_primary_device(DeviceListener *listener, - QemuOpts *device_opts) + const QDict *device_opts, + bool from_json, + Error **errp) { VirtIONet *n = container_of(listener, VirtIONet, primary_listener); const char *standby_id; @@ -3312,11 +3300,42 @@ static bool failover_hide_primary_device(DeviceListener *listener, if (!device_opts) { return false; } - standby_id = qemu_opt_get(device_opts, "failover_pair_id"); + + if (!qdict_haskey(device_opts, "failover_pair_id")) { + return false; + } + + if (!qdict_haskey(device_opts, "id")) { + error_setg(errp, "Device with failover_pair_id needs to have id"); + return false; + } + + standby_id = qdict_get_str(device_opts, "failover_pair_id"); if (g_strcmp0(standby_id, n->netclient_name) != 0) { return false; } + /* + * The hide helper can be called several times for a given device. + * Check there is only one primary for a virtio-net device but + * don't duplicate the qdict several times if it's called for the same + * device. + */ + if (n->primary_opts) { + const char *old, *new; + /* devices with failover_pair_id always have an id */ + old = qdict_get_str(n->primary_opts, "id"); + new = qdict_get_str(device_opts, "id"); + if (strcmp(old, new) != 0) { + error_setg(errp, "Cannot attach more than one primary device to " + "'%s': '%s' and '%s'", n->netclient_name, old, new); + return false; + } + } else { + n->primary_opts = qdict_clone_shallow(device_opts); + n->primary_opts_from_json = from_json; + } + /* failover_primary_hidden is set during feature negotiation */ return qatomic_read(&n->failover_primary_hidden); } @@ -3393,16 +3412,30 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) return; } - n->max_queues = MAX(n->nic_conf.peers.queues, 1); - if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) { - error_setg(errp, "Invalid number of queues (= %" PRIu32 "), " + n->max_ncs = MAX(n->nic_conf.peers.queues, 1); + + /* + * Figure out the datapath queue pairs since the backend could + * provide control queue via peers as well. + */ + if (n->nic_conf.peers.queues) { + for (i = 0; i < n->max_ncs; i++) { + if (n->nic_conf.peers.ncs[i]->is_datapath) { + ++n->max_queue_pairs; + } + } + } + n->max_queue_pairs = MAX(n->max_queue_pairs, 1); + + if (n->max_queue_pairs * 2 + 1 > VIRTIO_QUEUE_MAX) { + error_setg(errp, "Invalid number of queue pairs (= %" PRIu32 "), " "must be a positive integer less than %d.", - n->max_queues, (VIRTIO_QUEUE_MAX - 1) / 2); + n->max_queue_pairs, (VIRTIO_QUEUE_MAX - 1) / 2); virtio_cleanup(vdev); return; } - n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues); - n->curr_queues = 1; + n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queue_pairs); + n->curr_queue_pairs = 1; n->tx_timeout = n->net_conf.txtimer; if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer") @@ -3416,7 +3449,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n), n->net_conf.tx_queue_size); - for (i = 0; i < n->max_queues; i++) { + for (i = 0; i < n->max_queue_pairs; i++) { virtio_net_add_queue(n, i); } @@ -3440,13 +3473,13 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) object_get_typename(OBJECT(dev)), dev->id, n); } - for (i = 0; i < n->max_queues; i++) { + for (i = 0; i < n->max_queue_pairs; i++) { n->nic->ncs[i].do_not_pad = true; } peer_test_vnet_hdr(n); if (peer_has_vnet_hdr(n)) { - for (i = 0; i < n->max_queues; i++) { + for (i = 0; i < n->max_queue_pairs; i++) { qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true); } n->host_hdr_len = sizeof(struct virtio_net_hdr); @@ -3488,7 +3521,7 @@ static void virtio_net_device_unrealize(DeviceState *dev) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIONet *n = VIRTIO_NET(dev); - int i, max_queues; + int i, max_queue_pairs; if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) { virtio_net_unload_ebpf(n); @@ -3506,16 +3539,19 @@ static void virtio_net_device_unrealize(DeviceState *dev) g_free(n->vlans); if (n->failover) { + qobject_unref(n->primary_opts); device_listener_unregister(&n->primary_listener); remove_migration_state_change_notifier(&n->migration_state); + } else { + assert(n->primary_opts == NULL); } - max_queues = n->multiqueue ? n->max_queues : 1; - for (i = 0; i < max_queues; i++) { + max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; + for (i = 0; i < max_queue_pairs; i++) { virtio_net_del_queue(n, i); } /* delete also control vq */ - virtio_del_queue(vdev, max_queues * 2); + virtio_del_queue(vdev, max_queue_pairs * 2); qemu_announce_timer_del(&n->announce_timer, false); g_free(n->vqs); qemu_del_nic(n->nic); diff --git a/hw/nubus/mac-nubus-bridge.c b/hw/nubus/mac-nubus-bridge.c index 7c329300b8..a0da5a8b2f 100644 --- a/hw/nubus/mac-nubus-bridge.c +++ b/hw/nubus/mac-nubus-bridge.c @@ -1,5 +1,7 @@ /* - * Copyright (c) 2013-2018 Laurent Vivier + * QEMU Macintosh Nubus + * + * Copyright (c) 2013-2018 Laurent Vivier * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. @@ -13,13 +15,29 @@ static void mac_nubus_bridge_init(Object *obj) { - MacNubusState *s = MAC_NUBUS_BRIDGE(obj); + MacNubusBridge *s = MAC_NUBUS_BRIDGE(obj); + NubusBridge *nb = NUBUS_BRIDGE(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + NubusBus *bus = &nb->bus; - s->bus = NUBUS_BUS(qbus_create(TYPE_NUBUS_BUS, DEVICE(s), NULL)); + /* Macintosh only has slots 0x9 to 0xe available */ + bus->slot_available_mask = MAKE_64BIT_MASK(MAC_NUBUS_FIRST_SLOT, + MAC_NUBUS_SLOT_NB); - sysbus_init_mmio(sbd, &s->bus->super_slot_io); - sysbus_init_mmio(sbd, &s->bus->slot_io); + /* Aliases for slots 0x9 to 0xe */ + memory_region_init_alias(&s->super_slot_alias, obj, "super-slot-alias", + &bus->nubus_mr, + MAC_NUBUS_FIRST_SLOT * NUBUS_SUPER_SLOT_SIZE, + MAC_NUBUS_SLOT_NB * NUBUS_SUPER_SLOT_SIZE); + + memory_region_init_alias(&s->slot_alias, obj, "slot-alias", + &bus->nubus_mr, + NUBUS_SLOT_BASE + + MAC_NUBUS_FIRST_SLOT * NUBUS_SLOT_SIZE, + MAC_NUBUS_SLOT_NB * NUBUS_SLOT_SIZE); + + sysbus_init_mmio(sbd, &s->super_slot_alias); + sysbus_init_mmio(sbd, &s->slot_alias); } static void mac_nubus_bridge_class_init(ObjectClass *klass, void *data) @@ -33,7 +51,7 @@ static const TypeInfo mac_nubus_bridge_info = { .name = TYPE_MAC_NUBUS_BRIDGE, .parent = TYPE_NUBUS_BRIDGE, .instance_init = mac_nubus_bridge_init, - .instance_size = sizeof(MacNubusState), + .instance_size = sizeof(MacNubusBridge), .class_init = mac_nubus_bridge_class_init, }; diff --git a/hw/nubus/nubus-bridge.c b/hw/nubus/nubus-bridge.c index cd8c6a91eb..a42c86080f 100644 --- a/hw/nubus/nubus-bridge.c +++ b/hw/nubus/nubus-bridge.c @@ -1,5 +1,5 @@ /* - * QEMU Macintosh Nubus + * QEMU Nubus * * Copyright (c) 2013-2018 Laurent Vivier * @@ -12,17 +12,36 @@ #include "hw/sysbus.h" #include "hw/nubus/nubus.h" + +static void nubus_bridge_init(Object *obj) +{ + NubusBridge *s = NUBUS_BRIDGE(obj); + NubusBus *bus = &s->bus; + + qbus_init(bus, sizeof(s->bus), TYPE_NUBUS_BUS, DEVICE(s), NULL); + + qdev_init_gpio_out(DEVICE(s), bus->irqs, NUBUS_IRQS); +} + +static Property nubus_bridge_properties[] = { + DEFINE_PROP_UINT16("slot-available-mask", NubusBridge, + bus.slot_available_mask, 0xffff), + DEFINE_PROP_END_OF_LIST() +}; + static void nubus_bridge_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->fw_name = "nubus"; + device_class_set_props(dc, nubus_bridge_properties); } static const TypeInfo nubus_bridge_info = { .name = TYPE_NUBUS_BRIDGE, .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(SysBusDevice), + .instance_init = nubus_bridge_init, + .instance_size = sizeof(NubusBridge), .class_init = nubus_bridge_class_init, }; diff --git a/hw/nubus/nubus-bus.c b/hw/nubus/nubus-bus.c index 5c13452308..07c279bde5 100644 --- a/hw/nubus/nubus-bus.c +++ b/hw/nubus/nubus-bus.c @@ -8,9 +8,18 @@ * */ +/* + * References: + * Nubus Specification (TI) + * http://www.bitsavers.org/pdf/ti/nubus/2242825-0001_NuBus_Spec1983.pdf + * + * Designing Cards and Drivers for the Macintosh Family (Apple) + */ + #include "qemu/osdep.h" #include "hw/nubus/nubus.h" #include "qapi/error.h" +#include "trace.h" static NubusBus *nubus_find(void) @@ -19,72 +28,138 @@ static NubusBus *nubus_find(void) return NUBUS_BUS(object_resolve_path_type("", TYPE_NUBUS_BUS, NULL)); } -static void nubus_slot_write(void *opaque, hwaddr addr, uint64_t val, - unsigned int size) +static MemTxResult nubus_slot_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size, MemTxAttrs attrs) { - /* read only */ + trace_nubus_slot_write(addr, val, size); + return MEMTX_DECODE_ERROR; } - -static uint64_t nubus_slot_read(void *opaque, hwaddr addr, - unsigned int size) +static MemTxResult nubus_slot_read(void *opaque, hwaddr addr, uint64_t *data, + unsigned size, MemTxAttrs attrs) { - return 0; + trace_nubus_slot_read(addr, size); + return MEMTX_DECODE_ERROR; } static const MemoryRegionOps nubus_slot_ops = { - .read = nubus_slot_read, - .write = nubus_slot_write, + .read_with_attrs = nubus_slot_read, + .write_with_attrs = nubus_slot_write, .endianness = DEVICE_BIG_ENDIAN, .valid = { .min_access_size = 1, - .max_access_size = 1, + .max_access_size = 4, }, }; -static void nubus_super_slot_write(void *opaque, hwaddr addr, uint64_t val, - unsigned int size) +static MemTxResult nubus_super_slot_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size, + MemTxAttrs attrs) { - /* read only */ + trace_nubus_super_slot_write(addr, val, size); + return MEMTX_DECODE_ERROR; } -static uint64_t nubus_super_slot_read(void *opaque, hwaddr addr, - unsigned int size) +static MemTxResult nubus_super_slot_read(void *opaque, hwaddr addr, + uint64_t *data, unsigned size, + MemTxAttrs attrs) { - return 0; + trace_nubus_super_slot_read(addr, size); + return MEMTX_DECODE_ERROR; } static const MemoryRegionOps nubus_super_slot_ops = { - .read = nubus_super_slot_read, - .write = nubus_super_slot_write, + .read_with_attrs = nubus_super_slot_read, + .write_with_attrs = nubus_super_slot_write, .endianness = DEVICE_BIG_ENDIAN, .valid = { .min_access_size = 1, - .max_access_size = 1, + .max_access_size = 4, }, }; +static void nubus_unrealize(BusState *bus) +{ + NubusBus *nubus = NUBUS_BUS(bus); + + address_space_destroy(&nubus->nubus_as); +} + static void nubus_realize(BusState *bus, Error **errp) { + NubusBus *nubus = NUBUS_BUS(bus); + if (!nubus_find()) { error_setg(errp, "at most one %s device is permitted", TYPE_NUBUS_BUS); return; } + + address_space_init(&nubus->nubus_as, &nubus->nubus_mr, "nubus"); } static void nubus_init(Object *obj) { NubusBus *nubus = NUBUS_BUS(obj); + memory_region_init(&nubus->nubus_mr, obj, "nubus", 0x100000000); + memory_region_init_io(&nubus->super_slot_io, obj, &nubus_super_slot_ops, nubus, "nubus-super-slots", - NUBUS_SUPER_SLOT_NB * NUBUS_SUPER_SLOT_SIZE); + (NUBUS_SUPER_SLOT_NB + 1) * NUBUS_SUPER_SLOT_SIZE); + memory_region_add_subregion(&nubus->nubus_mr, 0x0, &nubus->super_slot_io); memory_region_init_io(&nubus->slot_io, obj, &nubus_slot_ops, nubus, "nubus-slots", NUBUS_SLOT_NB * NUBUS_SLOT_SIZE); + memory_region_add_subregion(&nubus->nubus_mr, + (NUBUS_SUPER_SLOT_NB + 1) * + NUBUS_SUPER_SLOT_SIZE, &nubus->slot_io); - nubus->current_slot = NUBUS_FIRST_SLOT; + nubus->slot_available_mask = MAKE_64BIT_MASK(NUBUS_FIRST_SLOT, + NUBUS_SLOT_NB); +} + +static char *nubus_get_dev_path(DeviceState *dev) +{ + NubusDevice *nd = NUBUS_DEVICE(dev); + BusState *bus = qdev_get_parent_bus(dev); + char *p = qdev_get_dev_path(bus->parent); + + if (p) { + char *ret = g_strdup_printf("%s/%s/%02x", p, bus->name, nd->slot); + g_free(p); + return ret; + } else { + return g_strdup_printf("%s/%02x", bus->name, nd->slot); + } +} + +static bool nubus_check_address(BusState *bus, DeviceState *dev, Error **errp) +{ + NubusDevice *nd = NUBUS_DEVICE(dev); + NubusBus *nubus = NUBUS_BUS(bus); + + if (nd->slot == -1) { + /* No slot specified, find first available free slot */ + int s = ctz32(nubus->slot_available_mask); + if (s != 32) { + nd->slot = s; + } else { + error_setg(errp, "Cannot register nubus card, no free slot " + "available"); + return false; + } + } else { + /* Slot specified, make sure the slot is available */ + if (!(nubus->slot_available_mask & BIT(nd->slot))) { + error_setg(errp, "Cannot register nubus card, slot %d is " + "unavailable or already occupied", nd->slot); + return false; + } + } + + nubus->slot_available_mask &= ~BIT(nd->slot); + return true; } static void nubus_class_init(ObjectClass *oc, void *data) @@ -92,6 +167,9 @@ static void nubus_class_init(ObjectClass *oc, void *data) BusClass *bc = BUS_CLASS(oc); bc->realize = nubus_realize; + bc->unrealize = nubus_unrealize; + bc->check_address = nubus_check_address; + bc->get_dev_path = nubus_get_dev_path; } static const TypeInfo nubus_bus_info = { diff --git a/hw/nubus/nubus-device.c b/hw/nubus/nubus-device.c index ffe78a8823..0f1852f671 100644 --- a/hw/nubus/nubus-device.c +++ b/hw/nubus/nubus-device.c @@ -9,194 +9,99 @@ */ #include "qemu/osdep.h" +#include "qemu/datadir.h" +#include "hw/irq.h" +#include "hw/loader.h" #include "hw/nubus/nubus.h" #include "qapi/error.h" +#include "qemu/error-report.h" -/* The Format Block Structure */ - -#define FBLOCK_DIRECTORY_OFFSET 0 -#define FBLOCK_LENGTH 4 -#define FBLOCK_CRC 8 -#define FBLOCK_REVISION_LEVEL 12 -#define FBLOCK_FORMAT 13 -#define FBLOCK_TEST_PATTERN 14 -#define FBLOCK_RESERVED 18 -#define FBLOCK_BYTE_LANES 19 - -#define FBLOCK_SIZE 20 -#define FBLOCK_PATTERN_VAL 0x5a932bc7 - -static uint64_t nubus_fblock_read(void *opaque, hwaddr addr, unsigned int size) +void nubus_set_irq(NubusDevice *nd, int level) { - NubusDevice *dev = opaque; - uint64_t val; + NubusBus *nubus = NUBUS_BUS(qdev_get_parent_bus(DEVICE(nd))); -#define BYTE(v, b) (((v) >> (24 - 8 * (b))) & 0xff) - switch (addr) { - case FBLOCK_BYTE_LANES: - val = dev->byte_lanes; - val |= (val ^ 0xf) << 4; - break; - case FBLOCK_RESERVED: - val = 0x00; - break; - case FBLOCK_TEST_PATTERN...FBLOCK_TEST_PATTERN + 3: - val = BYTE(FBLOCK_PATTERN_VAL, addr - FBLOCK_TEST_PATTERN); - break; - case FBLOCK_FORMAT: - val = dev->rom_format; - break; - case FBLOCK_REVISION_LEVEL: - val = dev->rom_rev; - break; - case FBLOCK_CRC...FBLOCK_CRC + 3: - val = BYTE(dev->rom_crc, addr - FBLOCK_CRC); - break; - case FBLOCK_LENGTH...FBLOCK_LENGTH + 3: - val = BYTE(dev->rom_length, addr - FBLOCK_LENGTH); - break; - case FBLOCK_DIRECTORY_OFFSET...FBLOCK_DIRECTORY_OFFSET + 3: - val = BYTE(dev->directory_offset, addr - FBLOCK_DIRECTORY_OFFSET); - break; - default: - val = 0; - break; - } - return val; -} - -static void nubus_fblock_write(void *opaque, hwaddr addr, uint64_t val, - unsigned int size) -{ - /* read only */ -} - -static const MemoryRegionOps nubus_format_block_ops = { - .read = nubus_fblock_read, - .write = nubus_fblock_write, - .endianness = DEVICE_BIG_ENDIAN, - .valid = { - .min_access_size = 1, - .max_access_size = 1, - } -}; - -static void nubus_register_format_block(NubusDevice *dev) -{ - char *fblock_name; - - fblock_name = g_strdup_printf("nubus-slot-%d-format-block", - dev->slot_nb); - - hwaddr fblock_offset = memory_region_size(&dev->slot_mem) - FBLOCK_SIZE; - memory_region_init_io(&dev->fblock_io, NULL, &nubus_format_block_ops, - dev, fblock_name, FBLOCK_SIZE); - memory_region_add_subregion(&dev->slot_mem, fblock_offset, - &dev->fblock_io); - - g_free(fblock_name); -} - -static void mac_nubus_rom_write(void *opaque, hwaddr addr, uint64_t val, - unsigned int size) -{ - /* read only */ -} - -static uint64_t mac_nubus_rom_read(void *opaque, hwaddr addr, - unsigned int size) -{ - NubusDevice *dev = opaque; - - return dev->rom[addr]; -} - -static const MemoryRegionOps mac_nubus_rom_ops = { - .read = mac_nubus_rom_read, - .write = mac_nubus_rom_write, - .endianness = DEVICE_BIG_ENDIAN, - .valid = { - .min_access_size = 1, - .max_access_size = 1, - }, -}; - - -void nubus_register_rom(NubusDevice *dev, const uint8_t *rom, uint32_t size, - int revision, int format, uint8_t byte_lanes) -{ - hwaddr rom_offset; - char *rom_name; - - /* FIXME : really compute CRC */ - dev->rom_length = 0; - dev->rom_crc = 0; - - dev->rom_rev = revision; - dev->rom_format = format; - - dev->byte_lanes = byte_lanes; - dev->directory_offset = -size; - - /* ROM */ - - dev->rom = rom; - rom_name = g_strdup_printf("nubus-slot-%d-rom", dev->slot_nb); - memory_region_init_io(&dev->rom_io, NULL, &mac_nubus_rom_ops, - dev, rom_name, size); - memory_region_set_readonly(&dev->rom_io, true); - - rom_offset = memory_region_size(&dev->slot_mem) - FBLOCK_SIZE + - dev->directory_offset; - memory_region_add_subregion(&dev->slot_mem, rom_offset, &dev->rom_io); - - g_free(rom_name); + qemu_set_irq(nubus->irqs[nd->slot], level); } static void nubus_device_realize(DeviceState *dev, Error **errp) { NubusBus *nubus = NUBUS_BUS(qdev_get_parent_bus(dev)); NubusDevice *nd = NUBUS_DEVICE(dev); - char *name; + char *name, *path; hwaddr slot_offset; + int64_t size; + int ret; - if (nubus->current_slot < NUBUS_FIRST_SLOT || - nubus->current_slot > NUBUS_LAST_SLOT) { - error_setg(errp, "Cannot register nubus card, not enough slots"); - return; - } - - nd->slot_nb = nubus->current_slot++; - name = g_strdup_printf("nubus-slot-%d", nd->slot_nb); - - if (nd->slot_nb < NUBUS_FIRST_SLOT) { - /* Super */ - slot_offset = (nd->slot_nb - 6) * NUBUS_SUPER_SLOT_SIZE; - - memory_region_init(&nd->slot_mem, OBJECT(dev), name, - NUBUS_SUPER_SLOT_SIZE); - memory_region_add_subregion(&nubus->super_slot_io, slot_offset, - &nd->slot_mem); - } else { - /* Normal */ - slot_offset = nd->slot_nb * NUBUS_SLOT_SIZE; - - memory_region_init(&nd->slot_mem, OBJECT(dev), name, NUBUS_SLOT_SIZE); - memory_region_add_subregion(&nubus->slot_io, slot_offset, - &nd->slot_mem); - } + /* Super */ + slot_offset = nd->slot * NUBUS_SUPER_SLOT_SIZE; + name = g_strdup_printf("nubus-super-slot-%x", nd->slot); + memory_region_init(&nd->super_slot_mem, OBJECT(dev), name, + NUBUS_SUPER_SLOT_SIZE); + memory_region_add_subregion(&nubus->super_slot_io, slot_offset, + &nd->super_slot_mem); g_free(name); - nubus_register_format_block(nd); + + /* Normal */ + slot_offset = nd->slot * NUBUS_SLOT_SIZE; + + name = g_strdup_printf("nubus-slot-%x", nd->slot); + memory_region_init(&nd->slot_mem, OBJECT(dev), name, NUBUS_SLOT_SIZE); + memory_region_add_subregion(&nubus->slot_io, slot_offset, + &nd->slot_mem); + g_free(name); + + /* Declaration ROM */ + if (nd->romfile != NULL) { + path = qemu_find_file(QEMU_FILE_TYPE_BIOS, nd->romfile); + if (path == NULL) { + path = g_strdup(nd->romfile); + } + + size = get_image_size(path); + if (size < 0) { + error_setg(errp, "failed to find romfile \"%s\"", nd->romfile); + g_free(path); + return; + } else if (size == 0) { + error_setg(errp, "romfile \"%s\" is empty", nd->romfile); + g_free(path); + return; + } else if (size > NUBUS_DECL_ROM_MAX_SIZE) { + error_setg(errp, "romfile \"%s\" too large (maximum size 128K)", + nd->romfile); + g_free(path); + return; + } + + name = g_strdup_printf("nubus-slot-%x-declaration-rom", nd->slot); + memory_region_init_rom(&nd->decl_rom, OBJECT(dev), name, size, + &error_abort); + ret = load_image_mr(path, &nd->decl_rom); + g_free(path); + if (ret < 0) { + error_setg(errp, "could not load romfile \"%s\"", nd->romfile); + return; + } + memory_region_add_subregion(&nd->slot_mem, NUBUS_SLOT_SIZE - size, + &nd->decl_rom); + } } +static Property nubus_device_properties[] = { + DEFINE_PROP_INT32("slot", NubusDevice, slot, -1), + DEFINE_PROP_STRING("romfile", NubusDevice, romfile), + DEFINE_PROP_END_OF_LIST() +}; + static void nubus_device_class_init(ObjectClass *oc, void *data) { DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = nubus_device_realize; dc->bus_type = TYPE_NUBUS_BUS; + device_class_set_props(dc, nubus_device_properties); } static const TypeInfo nubus_device_type_info = { diff --git a/hw/nubus/trace-events b/hw/nubus/trace-events new file mode 100644 index 0000000000..e31833d694 --- /dev/null +++ b/hw/nubus/trace-events @@ -0,0 +1,7 @@ +# See docs/devel/tracing.txt for syntax documentation. + +# nubus-bus.c +nubus_slot_read(uint64_t addr, int size) "reading unassigned addr 0x%"PRIx64 " size %d" +nubus_slot_write(uint64_t addr, uint64_t val, int size) "writing unassigned addr 0x%"PRIx64 " value 0x%"PRIx64 " size %d" +nubus_super_slot_read(uint64_t addr, int size) "reading unassigned addr 0x%"PRIx64 " size %d" +nubus_super_slot_write(uint64_t addr, uint64_t val, int size) "writing unassigned addr 0x%"PRIx64 " value 0x%"PRIx64 " size %d" diff --git a/hw/nubus/trace.h b/hw/nubus/trace.h new file mode 100644 index 0000000000..3749420da1 --- /dev/null +++ b/hw/nubus/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_nubus.h" diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c index 2f247a9275..6a571d18cf 100644 --- a/hw/nvme/ctrl.c +++ b/hw/nvme/ctrl.c @@ -6539,8 +6539,8 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp) return; } - qbus_create_inplace(&n->bus, sizeof(NvmeBus), TYPE_NVME_BUS, - &pci_dev->qdev, n->parent_obj.qdev.id); + qbus_init(&n->bus, sizeof(NvmeBus), TYPE_NVME_BUS, + &pci_dev->qdev, n->parent_obj.qdev.id); nvme_init_state(n); if (nvme_init_pci(n, pci_dev, errp)) { diff --git a/hw/nvme/subsys.c b/hw/nvme/subsys.c index 93c35950d6..495dcff5eb 100644 --- a/hw/nvme/subsys.c +++ b/hw/nvme/subsys.c @@ -50,8 +50,7 @@ static void nvme_subsys_realize(DeviceState *dev, Error **errp) { NvmeSubsystem *subsys = NVME_SUBSYS(dev); - qbus_create_inplace(&subsys->bus, sizeof(NvmeBus), TYPE_NVME_BUS, dev, - dev->id); + qbus_init(&subsys->bus, sizeof(NvmeBus), TYPE_NVME_BUS, dev, dev->id); nvme_subsys_setup(subsys); } diff --git a/hw/nvram/Kconfig b/hw/nvram/Kconfig index e872fcb194..24cfc18f8b 100644 --- a/hw/nvram/Kconfig +++ b/hw/nvram/Kconfig @@ -15,3 +15,22 @@ config NMC93XX_EEPROM config CHRP_NVRAM bool + +config XLNX_EFUSE_CRC + bool + +config XLNX_EFUSE + bool + select XLNX_EFUSE_CRC + +config XLNX_EFUSE_VERSAL + bool + select XLNX_EFUSE + +config XLNX_EFUSE_ZYNQMP + bool + select XLNX_EFUSE + +config XLNX_BBRAM + bool + select XLNX_EFUSE_CRC diff --git a/hw/nvram/meson.build b/hw/nvram/meson.build index fd2951a860..202a5466e6 100644 --- a/hw/nvram/meson.build +++ b/hw/nvram/meson.build @@ -9,5 +9,13 @@ softmmu_ss.add(when: 'CONFIG_AT24C', if_true: files('eeprom_at24c.c')) softmmu_ss.add(when: 'CONFIG_MAC_NVRAM', if_true: files('mac_nvram.c')) softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_otp.c')) softmmu_ss.add(when: 'CONFIG_NRF51_SOC', if_true: files('nrf51_nvm.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE_CRC', if_true: files('xlnx-efuse-crc.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE', if_true: files('xlnx-efuse.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE_VERSAL', if_true: files( + 'xlnx-versal-efuse-cache.c', + 'xlnx-versal-efuse-ctrl.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_EFUSE_ZYNQMP', if_true: files( + 'xlnx-zynqmp-efuse.c')) +softmmu_ss.add(when: 'CONFIG_XLNX_BBRAM', if_true: files('xlnx-bbram.c')) specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr_nvram.c')) diff --git a/hw/nvram/xlnx-bbram.c b/hw/nvram/xlnx-bbram.c new file mode 100644 index 0000000000..b70828e5bf --- /dev/null +++ b/hw/nvram/xlnx-bbram.c @@ -0,0 +1,545 @@ +/* + * QEMU model of the Xilinx BBRAM Battery Backed RAM + * + * Copyright (c) 2014-2021 Xilinx Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/nvram/xlnx-bbram.h" + +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "sysemu/blockdev.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/nvram/xlnx-efuse.h" + +#ifndef XLNX_BBRAM_ERR_DEBUG +#define XLNX_BBRAM_ERR_DEBUG 0 +#endif + +REG32(BBRAM_STATUS, 0x0) + FIELD(BBRAM_STATUS, AES_CRC_PASS, 9, 1) + FIELD(BBRAM_STATUS, AES_CRC_DONE, 8, 1) + FIELD(BBRAM_STATUS, BBRAM_ZEROIZED, 4, 1) + FIELD(BBRAM_STATUS, PGM_MODE, 0, 1) +REG32(BBRAM_CTRL, 0x4) + FIELD(BBRAM_CTRL, ZEROIZE, 0, 1) +REG32(PGM_MODE, 0x8) +REG32(BBRAM_AES_CRC, 0xc) +REG32(BBRAM_0, 0x10) +REG32(BBRAM_1, 0x14) +REG32(BBRAM_2, 0x18) +REG32(BBRAM_3, 0x1c) +REG32(BBRAM_4, 0x20) +REG32(BBRAM_5, 0x24) +REG32(BBRAM_6, 0x28) +REG32(BBRAM_7, 0x2c) +REG32(BBRAM_8, 0x30) +REG32(BBRAM_SLVERR, 0x34) + FIELD(BBRAM_SLVERR, ENABLE, 0, 1) +REG32(BBRAM_ISR, 0x38) + FIELD(BBRAM_ISR, APB_SLVERR, 0, 1) +REG32(BBRAM_IMR, 0x3c) + FIELD(BBRAM_IMR, APB_SLVERR, 0, 1) +REG32(BBRAM_IER, 0x40) + FIELD(BBRAM_IER, APB_SLVERR, 0, 1) +REG32(BBRAM_IDR, 0x44) + FIELD(BBRAM_IDR, APB_SLVERR, 0, 1) +REG32(BBRAM_MSW_LOCK, 0x4c) + FIELD(BBRAM_MSW_LOCK, VAL, 0, 1) + +#define R_MAX (R_BBRAM_MSW_LOCK + 1) + +#define RAM_MAX (A_BBRAM_8 + 4 - A_BBRAM_0) + +#define BBRAM_PGM_MAGIC 0x757bdf0d + +QEMU_BUILD_BUG_ON(R_MAX != ARRAY_SIZE(((XlnxBBRam *)0)->regs)); + +static bool bbram_msw_locked(XlnxBBRam *s) +{ + return ARRAY_FIELD_EX32(s->regs, BBRAM_MSW_LOCK, VAL) != 0; +} + +static bool bbram_pgm_enabled(XlnxBBRam *s) +{ + return ARRAY_FIELD_EX32(s->regs, BBRAM_STATUS, PGM_MODE) != 0; +} + +static void bbram_bdrv_error(XlnxBBRam *s, int rc, gchar *detail) +{ + Error *errp; + + error_setg_errno(&errp, -rc, "%s: BBRAM backstore %s failed.", + blk_name(s->blk), detail); + error_report("%s", error_get_pretty(errp)); + error_free(errp); + + g_free(detail); +} + +static void bbram_bdrv_read(XlnxBBRam *s, Error **errp) +{ + uint32_t *ram = &s->regs[R_BBRAM_0]; + int nr = RAM_MAX; + + if (!s->blk) { + return; + } + + s->blk_ro = !blk_supports_write_perm(s->blk); + if (!s->blk_ro) { + int rc; + + rc = blk_set_perm(s->blk, + (BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE), + BLK_PERM_ALL, NULL); + if (rc) { + s->blk_ro = true; + } + } + if (s->blk_ro) { + warn_report("%s: Skip saving updates to read-only BBRAM backstore.", + blk_name(s->blk)); + } + + if (blk_pread(s->blk, 0, ram, nr) < 0) { + error_setg(errp, + "%s: Failed to read %u bytes from BBRAM backstore.", + blk_name(s->blk), nr); + return; + } + + /* Convert from little-endian backstore for each 32-bit word */ + nr /= 4; + while (nr--) { + ram[nr] = le32_to_cpu(ram[nr]); + } +} + +static void bbram_bdrv_sync(XlnxBBRam *s, uint64_t hwaddr) +{ + uint32_t le32; + unsigned offset; + int rc; + + assert(A_BBRAM_0 <= hwaddr && hwaddr <= A_BBRAM_8); + + /* Backstore is always in little-endian */ + le32 = cpu_to_le32(s->regs[hwaddr / 4]); + + /* Update zeroized flag */ + if (le32 && (hwaddr != A_BBRAM_8 || s->bbram8_wo)) { + ARRAY_FIELD_DP32(s->regs, BBRAM_STATUS, BBRAM_ZEROIZED, 0); + } + + if (!s->blk || s->blk_ro) { + return; + } + + offset = hwaddr - A_BBRAM_0; + rc = blk_pwrite(s->blk, offset, &le32, 4, 0); + if (rc < 0) { + bbram_bdrv_error(s, rc, g_strdup_printf("write to offset %u", offset)); + } +} + +static void bbram_bdrv_zero(XlnxBBRam *s) +{ + int rc; + + ARRAY_FIELD_DP32(s->regs, BBRAM_STATUS, BBRAM_ZEROIZED, 1); + + if (!s->blk || s->blk_ro) { + return; + } + + rc = blk_make_zero(s->blk, 0); + if (rc < 0) { + bbram_bdrv_error(s, rc, g_strdup("zeroizing")); + } + + /* Restore bbram8 if it is non-zero */ + if (s->regs[R_BBRAM_8]) { + bbram_bdrv_sync(s, A_BBRAM_8); + } +} + +static void bbram_zeroize(XlnxBBRam *s) +{ + int nr = RAM_MAX - (s->bbram8_wo ? 0 : 4); /* only wo bbram8 is cleared */ + + memset(&s->regs[R_BBRAM_0], 0, nr); + bbram_bdrv_zero(s); +} + +static void bbram_update_irq(XlnxBBRam *s) +{ + bool pending = s->regs[R_BBRAM_ISR] & ~s->regs[R_BBRAM_IMR]; + + qemu_set_irq(s->irq_bbram, pending); +} + +static void bbram_ctrl_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + uint32_t val = val64; + + if (val & R_BBRAM_CTRL_ZEROIZE_MASK) { + bbram_zeroize(s); + /* The bit is self clearing */ + s->regs[R_BBRAM_CTRL] &= ~R_BBRAM_CTRL_ZEROIZE_MASK; + } +} + +static void bbram_pgm_mode_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + uint32_t val = val64; + + if (val == BBRAM_PGM_MAGIC) { + bbram_zeroize(s); + + /* The status bit is cleared only by POR */ + ARRAY_FIELD_DP32(s->regs, BBRAM_STATUS, PGM_MODE, 1); + } +} + +static void bbram_aes_crc_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + uint32_t calc_crc; + + if (!bbram_pgm_enabled(s)) { + /* We are not in programming mode, don't do anything */ + return; + } + + /* Perform the AES integrity check */ + s->regs[R_BBRAM_STATUS] |= R_BBRAM_STATUS_AES_CRC_DONE_MASK; + + /* + * Set check status. + * + * ZynqMP BBRAM check has a zero-u32 prepended; see: + * https://github.com/Xilinx/embeddedsw/blob/release-2019.2/lib/sw_services/xilskey/src/xilskey_bbramps_zynqmp.c#L311 + */ + calc_crc = xlnx_efuse_calc_crc(&s->regs[R_BBRAM_0], + (R_BBRAM_8 - R_BBRAM_0), s->crc_zpads); + + ARRAY_FIELD_DP32(s->regs, BBRAM_STATUS, AES_CRC_PASS, + (s->regs[R_BBRAM_AES_CRC] == calc_crc)); +} + +static uint64_t bbram_key_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + uint32_t original_data = *(uint32_t *) reg->data; + + if (bbram_pgm_enabled(s)) { + return val64; + } else { + /* We are not in programming mode, don't do anything */ + qemu_log_mask(LOG_GUEST_ERROR, + "Not in programming mode, dropping the write\n"); + return original_data; + } +} + +static void bbram_key_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + + bbram_bdrv_sync(s, reg->access->addr); +} + +static uint64_t bbram_wo_postr(RegisterInfo *reg, uint64_t val) +{ + return 0; +} + +static uint64_t bbram_r8_postr(RegisterInfo *reg, uint64_t val) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + + return s->bbram8_wo ? bbram_wo_postr(reg, val) : val; +} + +static bool bbram_r8_readonly(XlnxBBRam *s) +{ + return !bbram_pgm_enabled(s) || bbram_msw_locked(s); +} + +static uint64_t bbram_r8_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + + if (bbram_r8_readonly(s)) { + val64 = *(uint32_t *)reg->data; + } + + return val64; +} + +static void bbram_r8_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + + if (!bbram_r8_readonly(s)) { + bbram_bdrv_sync(s, A_BBRAM_8); + } +} + +static uint64_t bbram_msw_lock_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + + /* Never lock if bbram8 is wo; and, only POR can clear the lock */ + if (s->bbram8_wo) { + val64 = 0; + } else { + val64 |= s->regs[R_BBRAM_MSW_LOCK]; + } + + return val64; +} + +static void bbram_isr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + + bbram_update_irq(s); +} + +static uint64_t bbram_ier_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + uint32_t val = val64; + + s->regs[R_BBRAM_IMR] &= ~val; + bbram_update_irq(s); + return 0; +} + +static uint64_t bbram_idr_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxBBRam *s = XLNX_BBRAM(reg->opaque); + uint32_t val = val64; + + s->regs[R_BBRAM_IMR] |= val; + bbram_update_irq(s); + return 0; +} + +static RegisterAccessInfo bbram_ctrl_regs_info[] = { + { .name = "BBRAM_STATUS", .addr = A_BBRAM_STATUS, + .rsvd = 0xee, + .ro = 0x3ff, + },{ .name = "BBRAM_CTRL", .addr = A_BBRAM_CTRL, + .post_write = bbram_ctrl_postw, + },{ .name = "PGM_MODE", .addr = A_PGM_MODE, + .post_write = bbram_pgm_mode_postw, + },{ .name = "BBRAM_AES_CRC", .addr = A_BBRAM_AES_CRC, + .post_write = bbram_aes_crc_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_0", .addr = A_BBRAM_0, + .pre_write = bbram_key_prew, + .post_write = bbram_key_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_1", .addr = A_BBRAM_1, + .pre_write = bbram_key_prew, + .post_write = bbram_key_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_2", .addr = A_BBRAM_2, + .pre_write = bbram_key_prew, + .post_write = bbram_key_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_3", .addr = A_BBRAM_3, + .pre_write = bbram_key_prew, + .post_write = bbram_key_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_4", .addr = A_BBRAM_4, + .pre_write = bbram_key_prew, + .post_write = bbram_key_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_5", .addr = A_BBRAM_5, + .pre_write = bbram_key_prew, + .post_write = bbram_key_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_6", .addr = A_BBRAM_6, + .pre_write = bbram_key_prew, + .post_write = bbram_key_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_7", .addr = A_BBRAM_7, + .pre_write = bbram_key_prew, + .post_write = bbram_key_postw, + .post_read = bbram_wo_postr, + },{ .name = "BBRAM_8", .addr = A_BBRAM_8, + .pre_write = bbram_r8_prew, + .post_write = bbram_r8_postw, + .post_read = bbram_r8_postr, + },{ .name = "BBRAM_SLVERR", .addr = A_BBRAM_SLVERR, + .rsvd = ~1, + },{ .name = "BBRAM_ISR", .addr = A_BBRAM_ISR, + .w1c = 0x1, + .post_write = bbram_isr_postw, + },{ .name = "BBRAM_IMR", .addr = A_BBRAM_IMR, + .ro = 0x1, + },{ .name = "BBRAM_IER", .addr = A_BBRAM_IER, + .pre_write = bbram_ier_prew, + },{ .name = "BBRAM_IDR", .addr = A_BBRAM_IDR, + .pre_write = bbram_idr_prew, + },{ .name = "BBRAM_MSW_LOCK", .addr = A_BBRAM_MSW_LOCK, + .pre_write = bbram_msw_lock_prew, + .ro = ~R_BBRAM_MSW_LOCK_VAL_MASK, + } +}; + +static void bbram_ctrl_reset(DeviceState *dev) +{ + XlnxBBRam *s = XLNX_BBRAM(dev); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) { + if (i < R_BBRAM_0 || i > R_BBRAM_8) { + register_reset(&s->regs_info[i]); + } + } + + bbram_update_irq(s); +} + +static const MemoryRegionOps bbram_ctrl_ops = { + .read = register_read_memory, + .write = register_write_memory, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void bbram_ctrl_realize(DeviceState *dev, Error **errp) +{ + XlnxBBRam *s = XLNX_BBRAM(dev); + + if (s->crc_zpads) { + s->bbram8_wo = true; + } + + bbram_bdrv_read(s, errp); +} + +static void bbram_ctrl_init(Object *obj) +{ + XlnxBBRam *s = XLNX_BBRAM(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + RegisterInfoArray *reg_array; + + reg_array = + register_init_block32(DEVICE(obj), bbram_ctrl_regs_info, + ARRAY_SIZE(bbram_ctrl_regs_info), + s->regs_info, s->regs, + &bbram_ctrl_ops, + XLNX_BBRAM_ERR_DEBUG, + R_MAX * 4); + + sysbus_init_mmio(sbd, ®_array->mem); + sysbus_init_irq(sbd, &s->irq_bbram); +} + +static void bbram_prop_set_drive(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + DeviceState *dev = DEVICE(obj); + + qdev_prop_drive.set(obj, v, name, opaque, errp); + + /* Fill initial data if backend is attached after realized */ + if (dev->realized) { + bbram_bdrv_read(XLNX_BBRAM(obj), errp); + } +} + +static void bbram_prop_get_drive(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + qdev_prop_drive.get(obj, v, name, opaque, errp); +} + +static void bbram_prop_release_drive(Object *obj, const char *name, + void *opaque) +{ + qdev_prop_drive.release(obj, name, opaque); +} + +static const PropertyInfo bbram_prop_drive = { + .name = "str", + .description = "Node name or ID of a block device to use as BBRAM backend", + .realized_set_allowed = true, + .get = bbram_prop_get_drive, + .set = bbram_prop_set_drive, + .release = bbram_prop_release_drive, +}; + +static const VMStateDescription vmstate_bbram_ctrl = { + .name = TYPE_XLNX_BBRAM, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, XlnxBBRam, R_MAX), + VMSTATE_END_OF_LIST(), + } +}; + +static Property bbram_ctrl_props[] = { + DEFINE_PROP("drive", XlnxBBRam, blk, bbram_prop_drive, BlockBackend *), + DEFINE_PROP_UINT32("crc-zpads", XlnxBBRam, crc_zpads, 1), + DEFINE_PROP_END_OF_LIST(), +}; + +static void bbram_ctrl_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = bbram_ctrl_reset; + dc->realize = bbram_ctrl_realize; + dc->vmsd = &vmstate_bbram_ctrl; + device_class_set_props(dc, bbram_ctrl_props); +} + +static const TypeInfo bbram_ctrl_info = { + .name = TYPE_XLNX_BBRAM, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxBBRam), + .class_init = bbram_ctrl_class_init, + .instance_init = bbram_ctrl_init, +}; + +static void bbram_ctrl_register_types(void) +{ + type_register_static(&bbram_ctrl_info); +} + +type_init(bbram_ctrl_register_types) diff --git a/hw/nvram/xlnx-efuse-crc.c b/hw/nvram/xlnx-efuse-crc.c new file mode 100644 index 0000000000..5a5cc13f39 --- /dev/null +++ b/hw/nvram/xlnx-efuse-crc.c @@ -0,0 +1,119 @@ +/* + * Xilinx eFuse/bbram CRC calculator + * + * Copyright (c) 2021 Xilinx Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include "qemu/osdep.h" +#include "hw/nvram/xlnx-efuse.h" + +static uint32_t xlnx_efuse_u37_crc(uint32_t prev_crc, uint32_t data, + uint32_t addr) +{ + /* A table for 7-bit slicing */ + static const uint32_t crc_tab[128] = { + 0x00000000, 0xe13b70f7, 0xc79a971f, 0x26a1e7e8, + 0x8ad958cf, 0x6be22838, 0x4d43cfd0, 0xac78bf27, + 0x105ec76f, 0xf165b798, 0xd7c45070, 0x36ff2087, + 0x9a879fa0, 0x7bbcef57, 0x5d1d08bf, 0xbc267848, + 0x20bd8ede, 0xc186fe29, 0xe72719c1, 0x061c6936, + 0xaa64d611, 0x4b5fa6e6, 0x6dfe410e, 0x8cc531f9, + 0x30e349b1, 0xd1d83946, 0xf779deae, 0x1642ae59, + 0xba3a117e, 0x5b016189, 0x7da08661, 0x9c9bf696, + 0x417b1dbc, 0xa0406d4b, 0x86e18aa3, 0x67dafa54, + 0xcba24573, 0x2a993584, 0x0c38d26c, 0xed03a29b, + 0x5125dad3, 0xb01eaa24, 0x96bf4dcc, 0x77843d3b, + 0xdbfc821c, 0x3ac7f2eb, 0x1c661503, 0xfd5d65f4, + 0x61c69362, 0x80fde395, 0xa65c047d, 0x4767748a, + 0xeb1fcbad, 0x0a24bb5a, 0x2c855cb2, 0xcdbe2c45, + 0x7198540d, 0x90a324fa, 0xb602c312, 0x5739b3e5, + 0xfb410cc2, 0x1a7a7c35, 0x3cdb9bdd, 0xdde0eb2a, + 0x82f63b78, 0x63cd4b8f, 0x456cac67, 0xa457dc90, + 0x082f63b7, 0xe9141340, 0xcfb5f4a8, 0x2e8e845f, + 0x92a8fc17, 0x73938ce0, 0x55326b08, 0xb4091bff, + 0x1871a4d8, 0xf94ad42f, 0xdfeb33c7, 0x3ed04330, + 0xa24bb5a6, 0x4370c551, 0x65d122b9, 0x84ea524e, + 0x2892ed69, 0xc9a99d9e, 0xef087a76, 0x0e330a81, + 0xb21572c9, 0x532e023e, 0x758fe5d6, 0x94b49521, + 0x38cc2a06, 0xd9f75af1, 0xff56bd19, 0x1e6dcdee, + 0xc38d26c4, 0x22b65633, 0x0417b1db, 0xe52cc12c, + 0x49547e0b, 0xa86f0efc, 0x8ecee914, 0x6ff599e3, + 0xd3d3e1ab, 0x32e8915c, 0x144976b4, 0xf5720643, + 0x590ab964, 0xb831c993, 0x9e902e7b, 0x7fab5e8c, + 0xe330a81a, 0x020bd8ed, 0x24aa3f05, 0xc5914ff2, + 0x69e9f0d5, 0x88d28022, 0xae7367ca, 0x4f48173d, + 0xf36e6f75, 0x12551f82, 0x34f4f86a, 0xd5cf889d, + 0x79b737ba, 0x988c474d, 0xbe2da0a5, 0x5f16d052 + }; + + /* + * eFuse calculation is shown here: + * https://github.com/Xilinx/embeddedsw/blob/release-2019.2/lib/sw_services/xilskey/src/xilskey_utils.c#L1496 + * + * Each u32 word is appended a 5-bit value, for a total of 37 bits; see: + * https://github.com/Xilinx/embeddedsw/blob/release-2019.2/lib/sw_services/xilskey/src/xilskey_utils.c#L1356 + */ + uint32_t crc = prev_crc; + const unsigned rshf = 7; + const uint32_t im = (1 << rshf) - 1; + const uint32_t rm = (1 << (32 - rshf)) - 1; + const uint32_t i2 = (1 << 2) - 1; + const uint32_t r2 = (1 << 30) - 1; + + unsigned j; + uint32_t i, r; + uint64_t w; + + w = (uint64_t)(addr) << 32; + w |= data; + + /* Feed 35 bits, in 5 rounds, each a slice of 7 bits */ + for (j = 0; j < 5; j++) { + r = rm & (crc >> rshf); + i = im & (crc ^ w); + crc = crc_tab[i] ^ r; + + w >>= rshf; + } + + /* Feed the remaining 2 bits */ + r = r2 & (crc >> 2); + i = i2 & (crc ^ w); + crc = crc_tab[i << (rshf - 2)] ^ r; + + return crc; +} + +uint32_t xlnx_efuse_calc_crc(const uint32_t *data, unsigned u32_cnt, + unsigned zpads) +{ + uint32_t crc = 0; + unsigned index; + + for (index = zpads; index; index--) { + crc = xlnx_efuse_u37_crc(crc, 0, (index + u32_cnt)); + } + + for (index = u32_cnt; index; index--) { + crc = xlnx_efuse_u37_crc(crc, data[index - 1], index); + } + + return crc; +} diff --git a/hw/nvram/xlnx-efuse.c b/hw/nvram/xlnx-efuse.c new file mode 100644 index 0000000000..a0fd77b586 --- /dev/null +++ b/hw/nvram/xlnx-efuse.c @@ -0,0 +1,283 @@ +/* + * QEMU model of the EFUSE eFuse + * + * Copyright (c) 2015 Xilinx Inc. + * + * Written by Edgar E. Iglesias + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/nvram/xlnx-efuse.h" + +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "sysemu/blockdev.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" + +#define TBIT0_OFFSET 28 +#define TBIT1_OFFSET 29 +#define TBIT2_OFFSET 30 +#define TBIT3_OFFSET 31 +#define TBITS_PATTERN (0x0AU << TBIT0_OFFSET) +#define TBITS_MASK (0x0FU << TBIT0_OFFSET) + +bool xlnx_efuse_get_bit(XlnxEFuse *s, unsigned int bit) +{ + bool b = s->fuse32[bit / 32] & (1 << (bit % 32)); + return b; +} + +static int efuse_bytes(XlnxEFuse *s) +{ + return ROUND_UP((s->efuse_nr * s->efuse_size) / 8, 4); +} + +static int efuse_bdrv_read(XlnxEFuse *s, Error **errp) +{ + uint32_t *ram = s->fuse32; + int nr = efuse_bytes(s); + + if (!s->blk) { + return 0; + } + + s->blk_ro = !blk_supports_write_perm(s->blk); + if (!s->blk_ro) { + int rc; + + rc = blk_set_perm(s->blk, + (BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE), + BLK_PERM_ALL, NULL); + if (rc) { + s->blk_ro = true; + } + } + if (s->blk_ro) { + warn_report("%s: Skip saving updates to read-only eFUSE backstore.", + blk_name(s->blk)); + } + + if (blk_pread(s->blk, 0, ram, nr) < 0) { + error_setg(errp, "%s: Failed to read %u bytes from eFUSE backstore.", + blk_name(s->blk), nr); + return -1; + } + + /* Convert from little-endian backstore for each 32-bit row */ + nr /= 4; + while (nr--) { + ram[nr] = le32_to_cpu(ram[nr]); + } + + return 0; +} + +static void efuse_bdrv_sync(XlnxEFuse *s, unsigned int bit) +{ + unsigned int row_offset; + uint32_t le32; + + if (!s->blk || s->blk_ro) { + return; /* Silent on read-only backend to avoid message flood */ + } + + /* Backstore is always in little-endian */ + le32 = cpu_to_le32(xlnx_efuse_get_row(s, bit)); + + row_offset = (bit / 32) * 4; + if (blk_pwrite(s->blk, row_offset, &le32, 4, 0) < 0) { + error_report("%s: Failed to write offset %u of eFUSE backstore.", + blk_name(s->blk), row_offset); + } +} + +static int efuse_ro_bits_cmp(const void *a, const void *b) +{ + uint32_t i = *(const uint32_t *)a; + uint32_t j = *(const uint32_t *)b; + + return (i > j) - (i < j); +} + +static void efuse_ro_bits_sort(XlnxEFuse *s) +{ + uint32_t *ary = s->ro_bits; + const uint32_t cnt = s->ro_bits_cnt; + + if (ary && cnt > 1) { + qsort(ary, cnt, sizeof(ary[0]), efuse_ro_bits_cmp); + } +} + +static bool efuse_ro_bits_find(XlnxEFuse *s, uint32_t k) +{ + const uint32_t *ary = s->ro_bits; + const uint32_t cnt = s->ro_bits_cnt; + + if (!ary || !cnt) { + return false; + } + + return bsearch(&k, ary, cnt, sizeof(ary[0]), efuse_ro_bits_cmp) != NULL; +} + +bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit) +{ + if (efuse_ro_bits_find(s, bit)) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: WARN: " + "Ignored setting of readonly efuse bit<%u,%u>!\n", + path, (bit / 32), (bit % 32)); + return false; + } + + s->fuse32[bit / 32] |= 1 << (bit % 32); + efuse_bdrv_sync(s, bit); + return true; +} + +bool xlnx_efuse_k256_check(XlnxEFuse *s, uint32_t crc, unsigned start) +{ + uint32_t calc; + + /* A key always occupies multiple of whole rows */ + assert((start % 32) == 0); + + calc = xlnx_efuse_calc_crc(&s->fuse32[start / 32], (256 / 32), 0); + return calc == crc; +} + +uint32_t xlnx_efuse_tbits_check(XlnxEFuse *s) +{ + int nr; + uint32_t check = 0; + + for (nr = s->efuse_nr; nr-- > 0; ) { + int efuse_start_row_num = (s->efuse_size * nr) / 32; + uint32_t data = s->fuse32[efuse_start_row_num]; + + /* + * If the option is on, auto-init blank T-bits. + * (non-blank will still be reported as '0' in the check, e.g., + * for error-injection tests) + */ + if ((data & TBITS_MASK) == 0 && s->init_tbits) { + data |= TBITS_PATTERN; + + s->fuse32[efuse_start_row_num] = data; + efuse_bdrv_sync(s, (efuse_start_row_num * 32 + TBIT0_OFFSET)); + } + + check = (check << 1) | ((data & TBITS_MASK) == TBITS_PATTERN); + } + + return check; +} + +static void efuse_realize(DeviceState *dev, Error **errp) +{ + XlnxEFuse *s = XLNX_EFUSE(dev); + + /* Sort readonly-list for bsearch lookup */ + efuse_ro_bits_sort(s); + + if ((s->efuse_size % 32) != 0) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + error_setg(errp, + "%s.efuse-size: %u: property value not multiple of 32.", + path, s->efuse_size); + return; + } + + s->fuse32 = g_malloc0(efuse_bytes(s)); + if (efuse_bdrv_read(s, errp)) { + g_free(s->fuse32); + } +} + +static void efuse_prop_set_drive(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + DeviceState *dev = DEVICE(obj); + + qdev_prop_drive.set(obj, v, name, opaque, errp); + + /* Fill initial data if backend is attached after realized */ + if (dev->realized) { + efuse_bdrv_read(XLNX_EFUSE(obj), errp); + } +} + +static void efuse_prop_get_drive(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + qdev_prop_drive.get(obj, v, name, opaque, errp); +} + +static void efuse_prop_release_drive(Object *obj, const char *name, + void *opaque) +{ + qdev_prop_drive.release(obj, name, opaque); +} + +static const PropertyInfo efuse_prop_drive = { + .name = "str", + .description = "Node name or ID of a block device to use as eFUSE backend", + .realized_set_allowed = true, + .get = efuse_prop_get_drive, + .set = efuse_prop_set_drive, + .release = efuse_prop_release_drive, +}; + +static Property efuse_properties[] = { + DEFINE_PROP("drive", XlnxEFuse, blk, efuse_prop_drive, BlockBackend *), + DEFINE_PROP_UINT8("efuse-nr", XlnxEFuse, efuse_nr, 3), + DEFINE_PROP_UINT32("efuse-size", XlnxEFuse, efuse_size, 64 * 32), + DEFINE_PROP_BOOL("init-factory-tbits", XlnxEFuse, init_tbits, true), + DEFINE_PROP_ARRAY("read-only", XlnxEFuse, ro_bits_cnt, ro_bits, + qdev_prop_uint32, uint32_t), + DEFINE_PROP_END_OF_LIST(), +}; + +static void efuse_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = efuse_realize; + device_class_set_props(dc, efuse_properties); +} + +static const TypeInfo efuse_info = { + .name = TYPE_XLNX_EFUSE, + .parent = TYPE_DEVICE, + .instance_size = sizeof(XlnxEFuse), + .class_init = efuse_class_init, +}; + +static void efuse_register_types(void) +{ + type_register_static(&efuse_info); +} +type_init(efuse_register_types) diff --git a/hw/nvram/xlnx-versal-efuse-cache.c b/hw/nvram/xlnx-versal-efuse-cache.c new file mode 100644 index 0000000000..eaec64d785 --- /dev/null +++ b/hw/nvram/xlnx-versal-efuse-cache.c @@ -0,0 +1,114 @@ +/* + * QEMU model of the EFuse_Cache + * + * Copyright (c) 2017 Xilinx Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/nvram/xlnx-versal-efuse.h" + +#include "qemu/log.h" +#include "hw/qdev-properties.h" + +#define MR_SIZE 0xC00 + +static uint64_t efuse_cache_read(void *opaque, hwaddr addr, unsigned size) +{ + XlnxVersalEFuseCache *s = XLNX_VERSAL_EFUSE_CACHE(opaque); + unsigned int w0 = QEMU_ALIGN_DOWN(addr * 8, 32); + unsigned int w1 = QEMU_ALIGN_DOWN((addr + size - 1) * 8, 32); + + uint64_t ret; + + assert(w0 == w1 || (w0 + 32) == w1); + + ret = xlnx_versal_efuse_read_row(s->efuse, w1, NULL); + if (w0 < w1) { + ret <<= 32; + ret |= xlnx_versal_efuse_read_row(s->efuse, w0, NULL); + } + + /* If 'addr' unaligned, the guest is always assumed to be little-endian. */ + addr &= 3; + if (addr) { + ret >>= 8 * addr; + } + + return ret; +} + +static void efuse_cache_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + /* No Register Writes allowed */ + qemu_log_mask(LOG_GUEST_ERROR, "%s: efuse cache registers are read-only", + __func__); +} + +static const MemoryRegionOps efuse_cache_ops = { + .read = efuse_cache_read, + .write = efuse_cache_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static void efuse_cache_init(Object *obj) +{ + XlnxVersalEFuseCache *s = XLNX_VERSAL_EFUSE_CACHE(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + + memory_region_init_io(&s->iomem, obj, &efuse_cache_ops, s, + TYPE_XLNX_VERSAL_EFUSE_CACHE, MR_SIZE); + sysbus_init_mmio(sbd, &s->iomem); +} + +static Property efuse_cache_props[] = { + DEFINE_PROP_LINK("efuse", + XlnxVersalEFuseCache, efuse, + TYPE_XLNX_EFUSE, XlnxEFuse *), + + DEFINE_PROP_END_OF_LIST(), +}; + +static void efuse_cache_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, efuse_cache_props); +} + +static const TypeInfo efuse_cache_info = { + .name = TYPE_XLNX_VERSAL_EFUSE_CACHE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxVersalEFuseCache), + .class_init = efuse_cache_class_init, + .instance_init = efuse_cache_init, +}; + +static void efuse_cache_register_types(void) +{ + type_register_static(&efuse_cache_info); +} + +type_init(efuse_cache_register_types) diff --git a/hw/nvram/xlnx-versal-efuse-ctrl.c b/hw/nvram/xlnx-versal-efuse-ctrl.c new file mode 100644 index 0000000000..b35ba65ab5 --- /dev/null +++ b/hw/nvram/xlnx-versal-efuse-ctrl.c @@ -0,0 +1,793 @@ +/* + * QEMU model of the Versal eFuse controller + * + * Copyright (c) 2020 Xilinx Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/nvram/xlnx-versal-efuse.h" + +#include "qemu/log.h" +#include "qapi/error.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" + +#ifndef XLNX_VERSAL_EFUSE_CTRL_ERR_DEBUG +#define XLNX_VERSAL_EFUSE_CTRL_ERR_DEBUG 0 +#endif + +REG32(WR_LOCK, 0x0) + FIELD(WR_LOCK, LOCK, 0, 16) +REG32(CFG, 0x4) + FIELD(CFG, SLVERR_ENABLE, 5, 1) + FIELD(CFG, MARGIN_RD, 2, 1) + FIELD(CFG, PGM_EN, 1, 1) +REG32(STATUS, 0x8) + FIELD(STATUS, AES_USER_KEY_1_CRC_PASS, 11, 1) + FIELD(STATUS, AES_USER_KEY_1_CRC_DONE, 10, 1) + FIELD(STATUS, AES_USER_KEY_0_CRC_PASS, 9, 1) + FIELD(STATUS, AES_USER_KEY_0_CRC_DONE, 8, 1) + FIELD(STATUS, AES_CRC_PASS, 7, 1) + FIELD(STATUS, AES_CRC_DONE, 6, 1) + FIELD(STATUS, CACHE_DONE, 5, 1) + FIELD(STATUS, CACHE_LOAD, 4, 1) + FIELD(STATUS, EFUSE_2_TBIT, 2, 1) + FIELD(STATUS, EFUSE_1_TBIT, 1, 1) + FIELD(STATUS, EFUSE_0_TBIT, 0, 1) +REG32(EFUSE_PGM_ADDR, 0xc) + FIELD(EFUSE_PGM_ADDR, PAGE, 13, 4) + FIELD(EFUSE_PGM_ADDR, ROW, 5, 8) + FIELD(EFUSE_PGM_ADDR, COLUMN, 0, 5) +REG32(EFUSE_RD_ADDR, 0x10) + FIELD(EFUSE_RD_ADDR, PAGE, 13, 4) + FIELD(EFUSE_RD_ADDR, ROW, 5, 8) +REG32(EFUSE_RD_DATA, 0x14) +REG32(TPGM, 0x18) + FIELD(TPGM, VALUE, 0, 16) +REG32(TRD, 0x1c) + FIELD(TRD, VALUE, 0, 8) +REG32(TSU_H_PS, 0x20) + FIELD(TSU_H_PS, VALUE, 0, 8) +REG32(TSU_H_PS_CS, 0x24) + FIELD(TSU_H_PS_CS, VALUE, 0, 8) +REG32(TRDM, 0x28) + FIELD(TRDM, VALUE, 0, 8) +REG32(TSU_H_CS, 0x2c) + FIELD(TSU_H_CS, VALUE, 0, 8) +REG32(EFUSE_ISR, 0x30) + FIELD(EFUSE_ISR, APB_SLVERR, 31, 1) + FIELD(EFUSE_ISR, CACHE_PARITY_E2, 14, 1) + FIELD(EFUSE_ISR, CACHE_PARITY_E1, 13, 1) + FIELD(EFUSE_ISR, CACHE_PARITY_E0S, 12, 1) + FIELD(EFUSE_ISR, CACHE_PARITY_E0R, 11, 1) + FIELD(EFUSE_ISR, CACHE_APB_SLVERR, 10, 1) + FIELD(EFUSE_ISR, CACHE_REQ_ERROR, 9, 1) + FIELD(EFUSE_ISR, MAIN_REQ_ERROR, 8, 1) + FIELD(EFUSE_ISR, READ_ON_CACHE_LD, 7, 1) + FIELD(EFUSE_ISR, CACHE_FSM_ERROR, 6, 1) + FIELD(EFUSE_ISR, MAIN_FSM_ERROR, 5, 1) + FIELD(EFUSE_ISR, CACHE_ERROR, 4, 1) + FIELD(EFUSE_ISR, RD_ERROR, 3, 1) + FIELD(EFUSE_ISR, RD_DONE, 2, 1) + FIELD(EFUSE_ISR, PGM_ERROR, 1, 1) + FIELD(EFUSE_ISR, PGM_DONE, 0, 1) +REG32(EFUSE_IMR, 0x34) + FIELD(EFUSE_IMR, APB_SLVERR, 31, 1) + FIELD(EFUSE_IMR, CACHE_PARITY_E2, 14, 1) + FIELD(EFUSE_IMR, CACHE_PARITY_E1, 13, 1) + FIELD(EFUSE_IMR, CACHE_PARITY_E0S, 12, 1) + FIELD(EFUSE_IMR, CACHE_PARITY_E0R, 11, 1) + FIELD(EFUSE_IMR, CACHE_APB_SLVERR, 10, 1) + FIELD(EFUSE_IMR, CACHE_REQ_ERROR, 9, 1) + FIELD(EFUSE_IMR, MAIN_REQ_ERROR, 8, 1) + FIELD(EFUSE_IMR, READ_ON_CACHE_LD, 7, 1) + FIELD(EFUSE_IMR, CACHE_FSM_ERROR, 6, 1) + FIELD(EFUSE_IMR, MAIN_FSM_ERROR, 5, 1) + FIELD(EFUSE_IMR, CACHE_ERROR, 4, 1) + FIELD(EFUSE_IMR, RD_ERROR, 3, 1) + FIELD(EFUSE_IMR, RD_DONE, 2, 1) + FIELD(EFUSE_IMR, PGM_ERROR, 1, 1) + FIELD(EFUSE_IMR, PGM_DONE, 0, 1) +REG32(EFUSE_IER, 0x38) + FIELD(EFUSE_IER, APB_SLVERR, 31, 1) + FIELD(EFUSE_IER, CACHE_PARITY_E2, 14, 1) + FIELD(EFUSE_IER, CACHE_PARITY_E1, 13, 1) + FIELD(EFUSE_IER, CACHE_PARITY_E0S, 12, 1) + FIELD(EFUSE_IER, CACHE_PARITY_E0R, 11, 1) + FIELD(EFUSE_IER, CACHE_APB_SLVERR, 10, 1) + FIELD(EFUSE_IER, CACHE_REQ_ERROR, 9, 1) + FIELD(EFUSE_IER, MAIN_REQ_ERROR, 8, 1) + FIELD(EFUSE_IER, READ_ON_CACHE_LD, 7, 1) + FIELD(EFUSE_IER, CACHE_FSM_ERROR, 6, 1) + FIELD(EFUSE_IER, MAIN_FSM_ERROR, 5, 1) + FIELD(EFUSE_IER, CACHE_ERROR, 4, 1) + FIELD(EFUSE_IER, RD_ERROR, 3, 1) + FIELD(EFUSE_IER, RD_DONE, 2, 1) + FIELD(EFUSE_IER, PGM_ERROR, 1, 1) + FIELD(EFUSE_IER, PGM_DONE, 0, 1) +REG32(EFUSE_IDR, 0x3c) + FIELD(EFUSE_IDR, APB_SLVERR, 31, 1) + FIELD(EFUSE_IDR, CACHE_PARITY_E2, 14, 1) + FIELD(EFUSE_IDR, CACHE_PARITY_E1, 13, 1) + FIELD(EFUSE_IDR, CACHE_PARITY_E0S, 12, 1) + FIELD(EFUSE_IDR, CACHE_PARITY_E0R, 11, 1) + FIELD(EFUSE_IDR, CACHE_APB_SLVERR, 10, 1) + FIELD(EFUSE_IDR, CACHE_REQ_ERROR, 9, 1) + FIELD(EFUSE_IDR, MAIN_REQ_ERROR, 8, 1) + FIELD(EFUSE_IDR, READ_ON_CACHE_LD, 7, 1) + FIELD(EFUSE_IDR, CACHE_FSM_ERROR, 6, 1) + FIELD(EFUSE_IDR, MAIN_FSM_ERROR, 5, 1) + FIELD(EFUSE_IDR, CACHE_ERROR, 4, 1) + FIELD(EFUSE_IDR, RD_ERROR, 3, 1) + FIELD(EFUSE_IDR, RD_DONE, 2, 1) + FIELD(EFUSE_IDR, PGM_ERROR, 1, 1) + FIELD(EFUSE_IDR, PGM_DONE, 0, 1) +REG32(EFUSE_CACHE_LOAD, 0x40) + FIELD(EFUSE_CACHE_LOAD, LOAD, 0, 1) +REG32(EFUSE_PGM_LOCK, 0x44) + FIELD(EFUSE_PGM_LOCK, SPK_ID_LOCK, 0, 1) +REG32(EFUSE_AES_CRC, 0x48) +REG32(EFUSE_AES_USR_KEY0_CRC, 0x4c) +REG32(EFUSE_AES_USR_KEY1_CRC, 0x50) +REG32(EFUSE_PD, 0x54) +REG32(EFUSE_ANLG_OSC_SW_1LP, 0x60) +REG32(EFUSE_TEST_CTRL, 0x100) + +#define R_MAX (R_EFUSE_TEST_CTRL + 1) + +#define R_WR_LOCK_UNLOCK_PASSCODE (0xDF0D) + +/* + * eFuse layout references: + * https://github.com/Xilinx/embeddedsw/blob/release-2019.2/lib/sw_services/xilnvm/src/xnvm_efuse_hw.h + */ +#define BIT_POS_OF(A_) \ + ((uint32_t)((A_) & (R_EFUSE_PGM_ADDR_ROW_MASK | \ + R_EFUSE_PGM_ADDR_COLUMN_MASK))) + +#define BIT_POS(R_, C_) \ + ((uint32_t)((R_EFUSE_PGM_ADDR_ROW_MASK \ + & ((R_) << R_EFUSE_PGM_ADDR_ROW_SHIFT)) \ + | \ + (R_EFUSE_PGM_ADDR_COLUMN_MASK \ + & ((C_) << R_EFUSE_PGM_ADDR_COLUMN_SHIFT)))) + +#define EFUSE_TBIT_POS(A_) (BIT_POS_OF(A_) >= BIT_POS(0, 28)) + +#define EFUSE_ANCHOR_ROW (0) +#define EFUSE_ANCHOR_3_COL (27) +#define EFUSE_ANCHOR_1_COL (1) + +#define EFUSE_AES_KEY_START BIT_POS(12, 0) +#define EFUSE_AES_KEY_END BIT_POS(19, 31) +#define EFUSE_USER_KEY_0_START BIT_POS(20, 0) +#define EFUSE_USER_KEY_0_END BIT_POS(27, 31) +#define EFUSE_USER_KEY_1_START BIT_POS(28, 0) +#define EFUSE_USER_KEY_1_END BIT_POS(35, 31) + +#define EFUSE_RD_BLOCKED_START EFUSE_AES_KEY_START +#define EFUSE_RD_BLOCKED_END EFUSE_USER_KEY_1_END + +#define EFUSE_GLITCH_DET_WR_LK BIT_POS(4, 31) +#define EFUSE_PPK0_WR_LK BIT_POS(43, 6) +#define EFUSE_PPK1_WR_LK BIT_POS(43, 7) +#define EFUSE_PPK2_WR_LK BIT_POS(43, 8) +#define EFUSE_AES_WR_LK BIT_POS(43, 11) +#define EFUSE_USER_KEY_0_WR_LK BIT_POS(43, 13) +#define EFUSE_USER_KEY_1_WR_LK BIT_POS(43, 15) +#define EFUSE_PUF_SYN_LK BIT_POS(43, 16) +#define EFUSE_DNA_WR_LK BIT_POS(43, 27) +#define EFUSE_BOOT_ENV_WR_LK BIT_POS(43, 28) + +#define EFUSE_PGM_LOCKED_START BIT_POS(44, 0) +#define EFUSE_PGM_LOCKED_END BIT_POS(51, 31) + +#define EFUSE_PUF_PAGE (2) +#define EFUSE_PUF_SYN_START BIT_POS(129, 0) +#define EFUSE_PUF_SYN_END BIT_POS(255, 27) + +#define EFUSE_KEY_CRC_LK_ROW (43) +#define EFUSE_AES_KEY_CRC_LK_MASK ((1U << 9) | (1U << 10)) +#define EFUSE_USER_KEY_0_CRC_LK_MASK (1U << 12) +#define EFUSE_USER_KEY_1_CRC_LK_MASK (1U << 14) + +/* + * A handy macro to return value of an array element, + * or a specific default if given index is out of bound. + */ +#define ARRAY_GET(A_, I_, D_) \ + ((unsigned int)(I_) < ARRAY_SIZE(A_) ? (A_)[I_] : (D_)) + +QEMU_BUILD_BUG_ON(R_MAX != ARRAY_SIZE(((XlnxVersalEFuseCtrl *)0)->regs)); + +typedef struct XlnxEFuseLkSpec { + uint16_t row; + uint16_t lk_bit; +} XlnxEFuseLkSpec; + +static void efuse_imr_update_irq(XlnxVersalEFuseCtrl *s) +{ + bool pending = s->regs[R_EFUSE_ISR] & ~s->regs[R_EFUSE_IMR]; + qemu_set_irq(s->irq_efuse_imr, pending); +} + +static void efuse_isr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque); + efuse_imr_update_irq(s); +} + +static uint64_t efuse_ier_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque); + uint32_t val = val64; + + s->regs[R_EFUSE_IMR] &= ~val; + efuse_imr_update_irq(s); + return 0; +} + +static uint64_t efuse_idr_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque); + uint32_t val = val64; + + s->regs[R_EFUSE_IMR] |= val; + efuse_imr_update_irq(s); + return 0; +} + +static void efuse_status_tbits_sync(XlnxVersalEFuseCtrl *s) +{ + uint32_t check = xlnx_efuse_tbits_check(s->efuse); + uint32_t val = s->regs[R_STATUS]; + + val = FIELD_DP32(val, STATUS, EFUSE_0_TBIT, !!(check & (1 << 0))); + val = FIELD_DP32(val, STATUS, EFUSE_1_TBIT, !!(check & (1 << 1))); + val = FIELD_DP32(val, STATUS, EFUSE_2_TBIT, !!(check & (1 << 2))); + + s->regs[R_STATUS] = val; +} + +static void efuse_anchor_bits_check(XlnxVersalEFuseCtrl *s) +{ + unsigned page; + + if (!s->efuse || !s->efuse->init_tbits) { + return; + } + + for (page = 0; page < s->efuse->efuse_nr; page++) { + uint32_t row = 0, bit; + + row = FIELD_DP32(row, EFUSE_PGM_ADDR, PAGE, page); + row = FIELD_DP32(row, EFUSE_PGM_ADDR, ROW, EFUSE_ANCHOR_ROW); + + bit = FIELD_DP32(row, EFUSE_PGM_ADDR, COLUMN, EFUSE_ANCHOR_3_COL); + if (!xlnx_efuse_get_bit(s->efuse, bit)) { + xlnx_efuse_set_bit(s->efuse, bit); + } + + bit = FIELD_DP32(row, EFUSE_PGM_ADDR, COLUMN, EFUSE_ANCHOR_1_COL); + if (!xlnx_efuse_get_bit(s->efuse, bit)) { + xlnx_efuse_set_bit(s->efuse, bit); + } + } +} + +static void efuse_key_crc_check(RegisterInfo *reg, uint32_t crc, + uint32_t pass_mask, uint32_t done_mask, + unsigned first, uint32_t lk_mask) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque); + uint32_t r, lk_bits; + + /* + * To start, assume both DONE and PASS, and clear PASS by xor + * if CRC-check fails or CRC-check disabled by lock fuse. + */ + r = s->regs[R_STATUS] | done_mask | pass_mask; + + lk_bits = xlnx_efuse_get_row(s->efuse, EFUSE_KEY_CRC_LK_ROW) & lk_mask; + if (lk_bits == 0 && xlnx_efuse_k256_check(s->efuse, crc, first)) { + pass_mask = 0; + } + + s->regs[R_STATUS] = r ^ pass_mask; +} + +static void efuse_data_sync(XlnxVersalEFuseCtrl *s) +{ + efuse_status_tbits_sync(s); +} + +static int efuse_lk_spec_cmp(const void *a, const void *b) +{ + uint16_t r1 = ((const XlnxEFuseLkSpec *)a)->row; + uint16_t r2 = ((const XlnxEFuseLkSpec *)b)->row; + + return (r1 > r2) - (r1 < r2); +} + +static void efuse_lk_spec_sort(XlnxVersalEFuseCtrl *s) +{ + XlnxEFuseLkSpec *ary = s->extra_pg0_lock_spec; + const uint32_t n8 = s->extra_pg0_lock_n16 * 2; + const uint32_t sz = sizeof(ary[0]); + const uint32_t cnt = n8 / sz; + + if (ary && cnt) { + qsort(ary, cnt, sz, efuse_lk_spec_cmp); + } +} + +static uint32_t efuse_lk_spec_find(XlnxVersalEFuseCtrl *s, uint32_t row) +{ + const XlnxEFuseLkSpec *ary = s->extra_pg0_lock_spec; + const uint32_t n8 = s->extra_pg0_lock_n16 * 2; + const uint32_t sz = sizeof(ary[0]); + const uint32_t cnt = n8 / sz; + const XlnxEFuseLkSpec *item = NULL; + + if (ary && cnt) { + XlnxEFuseLkSpec k = { .row = row, }; + + item = bsearch(&k, ary, cnt, sz, efuse_lk_spec_cmp); + } + + return item ? item->lk_bit : 0; +} + +static uint32_t efuse_bit_locked(XlnxVersalEFuseCtrl *s, uint32_t bit) +{ + /* Hard-coded locks */ + static const uint16_t pg0_hard_lock[] = { + [4] = EFUSE_GLITCH_DET_WR_LK, + [37] = EFUSE_BOOT_ENV_WR_LK, + + [8 ... 11] = EFUSE_DNA_WR_LK, + [12 ... 19] = EFUSE_AES_WR_LK, + [20 ... 27] = EFUSE_USER_KEY_0_WR_LK, + [28 ... 35] = EFUSE_USER_KEY_1_WR_LK, + [64 ... 71] = EFUSE_PPK0_WR_LK, + [72 ... 79] = EFUSE_PPK1_WR_LK, + [80 ... 87] = EFUSE_PPK2_WR_LK, + }; + + uint32_t row = FIELD_EX32(bit, EFUSE_PGM_ADDR, ROW); + uint32_t lk_bit = ARRAY_GET(pg0_hard_lock, row, 0); + + return lk_bit ? lk_bit : efuse_lk_spec_find(s, row); +} + +static bool efuse_pgm_locked(XlnxVersalEFuseCtrl *s, unsigned int bit) +{ + + unsigned int lock = 1; + + /* Global lock */ + if (!ARRAY_FIELD_EX32(s->regs, CFG, PGM_EN)) { + goto ret_lock; + } + + /* Row lock */ + switch (FIELD_EX32(bit, EFUSE_PGM_ADDR, PAGE)) { + case 0: + if (ARRAY_FIELD_EX32(s->regs, EFUSE_PGM_LOCK, SPK_ID_LOCK) && + bit >= EFUSE_PGM_LOCKED_START && bit <= EFUSE_PGM_LOCKED_END) { + goto ret_lock; + } + + lock = efuse_bit_locked(s, bit); + break; + case EFUSE_PUF_PAGE: + if (bit < EFUSE_PUF_SYN_START || bit > EFUSE_PUF_SYN_END) { + lock = 0; + goto ret_lock; + } + + lock = EFUSE_PUF_SYN_LK; + break; + default: + lock = 0; + goto ret_lock; + } + + /* Row lock by an efuse bit */ + if (lock) { + lock = xlnx_efuse_get_bit(s->efuse, lock); + } + + ret_lock: + return lock != 0; +} + +static void efuse_pgm_addr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque); + unsigned bit = val64; + bool ok = false; + + /* Always zero out PGM_ADDR because it is write-only */ + s->regs[R_EFUSE_PGM_ADDR] = 0; + + /* + * Indicate error if bit is write-protected (or read-only + * as guarded by efuse_set_bit()). + * + * Keep it simple by not modeling program timing. + * + * Note: model must NEVER clear the PGM_ERROR bit; it is + * up to guest to do so (or by reset). + */ + if (efuse_pgm_locked(s, bit)) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Denied setting of efuse<%u, %u, %u>\n", + path, + FIELD_EX32(bit, EFUSE_PGM_ADDR, PAGE), + FIELD_EX32(bit, EFUSE_PGM_ADDR, ROW), + FIELD_EX32(bit, EFUSE_PGM_ADDR, COLUMN)); + } else if (xlnx_efuse_set_bit(s->efuse, bit)) { + ok = true; + if (EFUSE_TBIT_POS(bit)) { + efuse_status_tbits_sync(s); + } + } + + if (!ok) { + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, PGM_ERROR, 1); + } + + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, PGM_DONE, 1); + efuse_imr_update_irq(s); +} + +static void efuse_rd_addr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque); + unsigned bit = val64; + bool denied; + + /* Always zero out RD_ADDR because it is write-only */ + s->regs[R_EFUSE_RD_ADDR] = 0; + + /* + * Indicate error if row is read-blocked. + * + * Note: model must NEVER clear the RD_ERROR bit; it is + * up to guest to do so (or by reset). + */ + s->regs[R_EFUSE_RD_DATA] = xlnx_versal_efuse_read_row(s->efuse, + bit, &denied); + if (denied) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Denied reading of efuse<%u, %u>\n", + path, + FIELD_EX32(bit, EFUSE_RD_ADDR, PAGE), + FIELD_EX32(bit, EFUSE_RD_ADDR, ROW)); + + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_ERROR, 1); + } + + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_DONE, 1); + efuse_imr_update_irq(s); + return; +} + +static uint64_t efuse_cache_load_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque); + + if (val64 & R_EFUSE_CACHE_LOAD_LOAD_MASK) { + efuse_data_sync(s); + + ARRAY_FIELD_DP32(s->regs, STATUS, CACHE_DONE, 1); + efuse_imr_update_irq(s); + } + + return 0; +} + +static uint64_t efuse_pgm_lock_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(reg->opaque); + + /* Ignore all other bits */ + val64 = FIELD_EX32(val64, EFUSE_PGM_LOCK, SPK_ID_LOCK); + + /* Once the bit is written 1, only reset will clear it to 0 */ + val64 |= ARRAY_FIELD_EX32(s->regs, EFUSE_PGM_LOCK, SPK_ID_LOCK); + + return val64; +} + +static void efuse_aes_crc_postw(RegisterInfo *reg, uint64_t val64) +{ + efuse_key_crc_check(reg, val64, + R_STATUS_AES_CRC_PASS_MASK, + R_STATUS_AES_CRC_DONE_MASK, + EFUSE_AES_KEY_START, + EFUSE_AES_KEY_CRC_LK_MASK); +} + +static void efuse_aes_u0_crc_postw(RegisterInfo *reg, uint64_t val64) +{ + efuse_key_crc_check(reg, val64, + R_STATUS_AES_USER_KEY_0_CRC_PASS_MASK, + R_STATUS_AES_USER_KEY_0_CRC_DONE_MASK, + EFUSE_USER_KEY_0_START, + EFUSE_USER_KEY_0_CRC_LK_MASK); +} + +static void efuse_aes_u1_crc_postw(RegisterInfo *reg, uint64_t val64) +{ + efuse_key_crc_check(reg, val64, + R_STATUS_AES_USER_KEY_1_CRC_PASS_MASK, + R_STATUS_AES_USER_KEY_1_CRC_DONE_MASK, + EFUSE_USER_KEY_1_START, + EFUSE_USER_KEY_1_CRC_LK_MASK); +} + +static uint64_t efuse_wr_lock_prew(RegisterInfo *reg, uint64_t val) +{ + return val != R_WR_LOCK_UNLOCK_PASSCODE; +} + +static const RegisterAccessInfo efuse_ctrl_regs_info[] = { + { .name = "WR_LOCK", .addr = A_WR_LOCK, + .reset = 0x1, + .pre_write = efuse_wr_lock_prew, + },{ .name = "CFG", .addr = A_CFG, + .rsvd = 0x9, + },{ .name = "STATUS", .addr = A_STATUS, + .rsvd = 0x8, + .ro = 0xfff, + },{ .name = "EFUSE_PGM_ADDR", .addr = A_EFUSE_PGM_ADDR, + .post_write = efuse_pgm_addr_postw, + },{ .name = "EFUSE_RD_ADDR", .addr = A_EFUSE_RD_ADDR, + .rsvd = 0x1f, + .post_write = efuse_rd_addr_postw, + },{ .name = "EFUSE_RD_DATA", .addr = A_EFUSE_RD_DATA, + .ro = 0xffffffff, + },{ .name = "TPGM", .addr = A_TPGM, + },{ .name = "TRD", .addr = A_TRD, + .reset = 0x19, + },{ .name = "TSU_H_PS", .addr = A_TSU_H_PS, + .reset = 0xff, + },{ .name = "TSU_H_PS_CS", .addr = A_TSU_H_PS_CS, + .reset = 0x11, + },{ .name = "TRDM", .addr = A_TRDM, + .reset = 0x3a, + },{ .name = "TSU_H_CS", .addr = A_TSU_H_CS, + .reset = 0x16, + },{ .name = "EFUSE_ISR", .addr = A_EFUSE_ISR, + .rsvd = 0x7fff8000, + .w1c = 0x80007fff, + .post_write = efuse_isr_postw, + },{ .name = "EFUSE_IMR", .addr = A_EFUSE_IMR, + .reset = 0x80007fff, + .rsvd = 0x7fff8000, + .ro = 0xffffffff, + },{ .name = "EFUSE_IER", .addr = A_EFUSE_IER, + .rsvd = 0x7fff8000, + .pre_write = efuse_ier_prew, + },{ .name = "EFUSE_IDR", .addr = A_EFUSE_IDR, + .rsvd = 0x7fff8000, + .pre_write = efuse_idr_prew, + },{ .name = "EFUSE_CACHE_LOAD", .addr = A_EFUSE_CACHE_LOAD, + .pre_write = efuse_cache_load_prew, + },{ .name = "EFUSE_PGM_LOCK", .addr = A_EFUSE_PGM_LOCK, + .pre_write = efuse_pgm_lock_prew, + },{ .name = "EFUSE_AES_CRC", .addr = A_EFUSE_AES_CRC, + .post_write = efuse_aes_crc_postw, + },{ .name = "EFUSE_AES_USR_KEY0_CRC", .addr = A_EFUSE_AES_USR_KEY0_CRC, + .post_write = efuse_aes_u0_crc_postw, + },{ .name = "EFUSE_AES_USR_KEY1_CRC", .addr = A_EFUSE_AES_USR_KEY1_CRC, + .post_write = efuse_aes_u1_crc_postw, + },{ .name = "EFUSE_PD", .addr = A_EFUSE_PD, + .ro = 0xfffffffe, + },{ .name = "EFUSE_ANLG_OSC_SW_1LP", .addr = A_EFUSE_ANLG_OSC_SW_1LP, + },{ .name = "EFUSE_TEST_CTRL", .addr = A_EFUSE_TEST_CTRL, + .reset = 0x8, + } +}; + +static void efuse_ctrl_reg_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + RegisterInfoArray *reg_array = opaque; + XlnxVersalEFuseCtrl *s; + Object *dev; + + assert(reg_array != NULL); + + dev = reg_array->mem.owner; + assert(dev); + + s = XLNX_VERSAL_EFUSE_CTRL(dev); + + if (addr != A_WR_LOCK && s->regs[R_WR_LOCK]) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, + "%s[reg_0x%02lx]: Attempt to write locked register.\n", + path, (long)addr); + } else { + register_write_memory(opaque, addr, data, size); + } +} + +static void efuse_ctrl_register_reset(RegisterInfo *reg) +{ + if (!reg->data || !reg->access) { + return; + } + + /* Reset must not trigger some registers' writers */ + switch (reg->access->addr) { + case A_EFUSE_AES_CRC: + case A_EFUSE_AES_USR_KEY0_CRC: + case A_EFUSE_AES_USR_KEY1_CRC: + *(uint32_t *)reg->data = reg->access->reset; + return; + } + + register_reset(reg); +} + +static void efuse_ctrl_reset(DeviceState *dev) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(dev); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) { + efuse_ctrl_register_reset(&s->regs_info[i]); + } + + efuse_anchor_bits_check(s); + efuse_data_sync(s); + efuse_imr_update_irq(s); +} + +static const MemoryRegionOps efuse_ctrl_ops = { + .read = register_read_memory, + .write = efuse_ctrl_reg_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void efuse_ctrl_realize(DeviceState *dev, Error **errp) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(dev); + const uint32_t lks_sz = sizeof(XlnxEFuseLkSpec) / 2; + + if (!s->efuse) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + error_setg(errp, "%s.efuse: link property not connected to XLNX-EFUSE", + path); + return; + } + + /* Sort property-defined pgm-locks for bsearch lookup */ + if ((s->extra_pg0_lock_n16 % lks_sz) != 0) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + error_setg(errp, + "%s.pg0-lock: array property item-count not multiple of %u", + path, lks_sz); + return; + } + + efuse_lk_spec_sort(s); +} + +static void efuse_ctrl_init(Object *obj) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + RegisterInfoArray *reg_array; + + reg_array = + register_init_block32(DEVICE(obj), efuse_ctrl_regs_info, + ARRAY_SIZE(efuse_ctrl_regs_info), + s->regs_info, s->regs, + &efuse_ctrl_ops, + XLNX_VERSAL_EFUSE_CTRL_ERR_DEBUG, + R_MAX * 4); + + sysbus_init_mmio(sbd, ®_array->mem); + sysbus_init_irq(sbd, &s->irq_efuse_imr); +} + +static const VMStateDescription vmstate_efuse_ctrl = { + .name = TYPE_XLNX_VERSAL_EFUSE_CTRL, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, XlnxVersalEFuseCtrl, R_MAX), + VMSTATE_END_OF_LIST(), + } +}; + +static Property efuse_ctrl_props[] = { + DEFINE_PROP_LINK("efuse", + XlnxVersalEFuseCtrl, efuse, + TYPE_XLNX_EFUSE, XlnxEFuse *), + DEFINE_PROP_ARRAY("pg0-lock", + XlnxVersalEFuseCtrl, extra_pg0_lock_n16, + extra_pg0_lock_spec, qdev_prop_uint16, uint16_t), + + DEFINE_PROP_END_OF_LIST(), +}; + +static void efuse_ctrl_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = efuse_ctrl_reset; + dc->realize = efuse_ctrl_realize; + dc->vmsd = &vmstate_efuse_ctrl; + device_class_set_props(dc, efuse_ctrl_props); +} + +static const TypeInfo efuse_ctrl_info = { + .name = TYPE_XLNX_VERSAL_EFUSE_CTRL, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxVersalEFuseCtrl), + .class_init = efuse_ctrl_class_init, + .instance_init = efuse_ctrl_init, +}; + +static void efuse_ctrl_register_types(void) +{ + type_register_static(&efuse_ctrl_info); +} + +type_init(efuse_ctrl_register_types) + +/* + * Retrieve a row, with unreadable bits returned as 0. + */ +uint32_t xlnx_versal_efuse_read_row(XlnxEFuse *efuse, + uint32_t bit, bool *denied) +{ + bool dummy; + + if (!denied) { + denied = &dummy; + } + + if (bit >= EFUSE_RD_BLOCKED_START && bit <= EFUSE_RD_BLOCKED_END) { + *denied = true; + return 0; + } + + *denied = false; + return xlnx_efuse_get_row(efuse, bit); +} diff --git a/hw/nvram/xlnx-zynqmp-efuse.c b/hw/nvram/xlnx-zynqmp-efuse.c new file mode 100644 index 0000000000..228ba0bbfa --- /dev/null +++ b/hw/nvram/xlnx-zynqmp-efuse.c @@ -0,0 +1,861 @@ +/* + * QEMU model of the ZynqMP eFuse + * + * Copyright (c) 2015 Xilinx Inc. + * + * Written by Edgar E. Iglesias + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/nvram/xlnx-zynqmp-efuse.h" + +#include "qemu/log.h" +#include "qapi/error.h" +#include "migration/vmstate.h" +#include "hw/qdev-properties.h" + +#ifndef ZYNQMP_EFUSE_ERR_DEBUG +#define ZYNQMP_EFUSE_ERR_DEBUG 0 +#endif + +REG32(WR_LOCK, 0x0) + FIELD(WR_LOCK, LOCK, 0, 16) +REG32(CFG, 0x4) + FIELD(CFG, SLVERR_ENABLE, 5, 1) + FIELD(CFG, MARGIN_RD, 2, 2) + FIELD(CFG, PGM_EN, 1, 1) + FIELD(CFG, EFUSE_CLK_SEL, 0, 1) +REG32(STATUS, 0x8) + FIELD(STATUS, AES_CRC_PASS, 7, 1) + FIELD(STATUS, AES_CRC_DONE, 6, 1) + FIELD(STATUS, CACHE_DONE, 5, 1) + FIELD(STATUS, CACHE_LOAD, 4, 1) + FIELD(STATUS, EFUSE_3_TBIT, 2, 1) + FIELD(STATUS, EFUSE_2_TBIT, 1, 1) + FIELD(STATUS, EFUSE_0_TBIT, 0, 1) +REG32(EFUSE_PGM_ADDR, 0xc) + FIELD(EFUSE_PGM_ADDR, EFUSE, 11, 2) + FIELD(EFUSE_PGM_ADDR, ROW, 5, 6) + FIELD(EFUSE_PGM_ADDR, COLUMN, 0, 5) +REG32(EFUSE_RD_ADDR, 0x10) + FIELD(EFUSE_RD_ADDR, EFUSE, 11, 2) + FIELD(EFUSE_RD_ADDR, ROW, 5, 6) +REG32(EFUSE_RD_DATA, 0x14) +REG32(TPGM, 0x18) + FIELD(TPGM, VALUE, 0, 16) +REG32(TRD, 0x1c) + FIELD(TRD, VALUE, 0, 8) +REG32(TSU_H_PS, 0x20) + FIELD(TSU_H_PS, VALUE, 0, 8) +REG32(TSU_H_PS_CS, 0x24) + FIELD(TSU_H_PS_CS, VALUE, 0, 8) +REG32(TSU_H_CS, 0x2c) + FIELD(TSU_H_CS, VALUE, 0, 4) +REG32(EFUSE_ISR, 0x30) + FIELD(EFUSE_ISR, APB_SLVERR, 31, 1) + FIELD(EFUSE_ISR, CACHE_ERROR, 4, 1) + FIELD(EFUSE_ISR, RD_ERROR, 3, 1) + FIELD(EFUSE_ISR, RD_DONE, 2, 1) + FIELD(EFUSE_ISR, PGM_ERROR, 1, 1) + FIELD(EFUSE_ISR, PGM_DONE, 0, 1) +REG32(EFUSE_IMR, 0x34) + FIELD(EFUSE_IMR, APB_SLVERR, 31, 1) + FIELD(EFUSE_IMR, CACHE_ERROR, 4, 1) + FIELD(EFUSE_IMR, RD_ERROR, 3, 1) + FIELD(EFUSE_IMR, RD_DONE, 2, 1) + FIELD(EFUSE_IMR, PGM_ERROR, 1, 1) + FIELD(EFUSE_IMR, PGM_DONE, 0, 1) +REG32(EFUSE_IER, 0x38) + FIELD(EFUSE_IER, APB_SLVERR, 31, 1) + FIELD(EFUSE_IER, CACHE_ERROR, 4, 1) + FIELD(EFUSE_IER, RD_ERROR, 3, 1) + FIELD(EFUSE_IER, RD_DONE, 2, 1) + FIELD(EFUSE_IER, PGM_ERROR, 1, 1) + FIELD(EFUSE_IER, PGM_DONE, 0, 1) +REG32(EFUSE_IDR, 0x3c) + FIELD(EFUSE_IDR, APB_SLVERR, 31, 1) + FIELD(EFUSE_IDR, CACHE_ERROR, 4, 1) + FIELD(EFUSE_IDR, RD_ERROR, 3, 1) + FIELD(EFUSE_IDR, RD_DONE, 2, 1) + FIELD(EFUSE_IDR, PGM_ERROR, 1, 1) + FIELD(EFUSE_IDR, PGM_DONE, 0, 1) +REG32(EFUSE_CACHE_LOAD, 0x40) + FIELD(EFUSE_CACHE_LOAD, LOAD, 0, 1) +REG32(EFUSE_PGM_LOCK, 0x44) + FIELD(EFUSE_PGM_LOCK, SPK_ID_LOCK, 0, 1) +REG32(EFUSE_AES_CRC, 0x48) +REG32(EFUSE_TBITS_PRGRMG_EN, 0x100) + FIELD(EFUSE_TBITS_PRGRMG_EN, TBITS_PRGRMG_EN, 3, 1) +REG32(DNA_0, 0x100c) +REG32(DNA_1, 0x1010) +REG32(DNA_2, 0x1014) +REG32(IPDISABLE, 0x1018) + FIELD(IPDISABLE, VCU_DIS, 8, 1) + FIELD(IPDISABLE, GPU_DIS, 5, 1) + FIELD(IPDISABLE, APU3_DIS, 3, 1) + FIELD(IPDISABLE, APU2_DIS, 2, 1) + FIELD(IPDISABLE, APU1_DIS, 1, 1) + FIELD(IPDISABLE, APU0_DIS, 0, 1) +REG32(SYSOSC_CTRL, 0x101c) + FIELD(SYSOSC_CTRL, SYSOSC_EN, 0, 1) +REG32(USER_0, 0x1020) +REG32(USER_1, 0x1024) +REG32(USER_2, 0x1028) +REG32(USER_3, 0x102c) +REG32(USER_4, 0x1030) +REG32(USER_5, 0x1034) +REG32(USER_6, 0x1038) +REG32(USER_7, 0x103c) +REG32(MISC_USER_CTRL, 0x1040) + FIELD(MISC_USER_CTRL, FPD_SC_EN_0, 14, 1) + FIELD(MISC_USER_CTRL, LPD_SC_EN_0, 11, 1) + FIELD(MISC_USER_CTRL, LBIST_EN, 10, 1) + FIELD(MISC_USER_CTRL, USR_WRLK_7, 7, 1) + FIELD(MISC_USER_CTRL, USR_WRLK_6, 6, 1) + FIELD(MISC_USER_CTRL, USR_WRLK_5, 5, 1) + FIELD(MISC_USER_CTRL, USR_WRLK_4, 4, 1) + FIELD(MISC_USER_CTRL, USR_WRLK_3, 3, 1) + FIELD(MISC_USER_CTRL, USR_WRLK_2, 2, 1) + FIELD(MISC_USER_CTRL, USR_WRLK_1, 1, 1) + FIELD(MISC_USER_CTRL, USR_WRLK_0, 0, 1) +REG32(ROM_RSVD, 0x1044) + FIELD(ROM_RSVD, PBR_BOOT_ERROR, 0, 3) +REG32(PUF_CHASH, 0x1050) +REG32(PUF_MISC, 0x1054) + FIELD(PUF_MISC, REGISTER_DIS, 31, 1) + FIELD(PUF_MISC, SYN_WRLK, 30, 1) + FIELD(PUF_MISC, SYN_INVLD, 29, 1) + FIELD(PUF_MISC, TEST2_DIS, 28, 1) + FIELD(PUF_MISC, UNUSED27, 27, 1) + FIELD(PUF_MISC, UNUSED26, 26, 1) + FIELD(PUF_MISC, UNUSED25, 25, 1) + FIELD(PUF_MISC, UNUSED24, 24, 1) + FIELD(PUF_MISC, AUX, 0, 24) +REG32(SEC_CTRL, 0x1058) + FIELD(SEC_CTRL, PPK1_INVLD, 30, 2) + FIELD(SEC_CTRL, PPK1_WRLK, 29, 1) + FIELD(SEC_CTRL, PPK0_INVLD, 27, 2) + FIELD(SEC_CTRL, PPK0_WRLK, 26, 1) + FIELD(SEC_CTRL, RSA_EN, 11, 15) + FIELD(SEC_CTRL, SEC_LOCK, 10, 1) + FIELD(SEC_CTRL, PROG_GATE_2, 9, 1) + FIELD(SEC_CTRL, PROG_GATE_1, 8, 1) + FIELD(SEC_CTRL, PROG_GATE_0, 7, 1) + FIELD(SEC_CTRL, DFT_DIS, 6, 1) + FIELD(SEC_CTRL, JTAG_DIS, 5, 1) + FIELD(SEC_CTRL, ERROR_DIS, 4, 1) + FIELD(SEC_CTRL, BBRAM_DIS, 3, 1) + FIELD(SEC_CTRL, ENC_ONLY, 2, 1) + FIELD(SEC_CTRL, AES_WRLK, 1, 1) + FIELD(SEC_CTRL, AES_RDLK, 0, 1) +REG32(SPK_ID, 0x105c) +REG32(PPK0_0, 0x10a0) +REG32(PPK0_1, 0x10a4) +REG32(PPK0_2, 0x10a8) +REG32(PPK0_3, 0x10ac) +REG32(PPK0_4, 0x10b0) +REG32(PPK0_5, 0x10b4) +REG32(PPK0_6, 0x10b8) +REG32(PPK0_7, 0x10bc) +REG32(PPK0_8, 0x10c0) +REG32(PPK0_9, 0x10c4) +REG32(PPK0_10, 0x10c8) +REG32(PPK0_11, 0x10cc) +REG32(PPK1_0, 0x10d0) +REG32(PPK1_1, 0x10d4) +REG32(PPK1_2, 0x10d8) +REG32(PPK1_3, 0x10dc) +REG32(PPK1_4, 0x10e0) +REG32(PPK1_5, 0x10e4) +REG32(PPK1_6, 0x10e8) +REG32(PPK1_7, 0x10ec) +REG32(PPK1_8, 0x10f0) +REG32(PPK1_9, 0x10f4) +REG32(PPK1_10, 0x10f8) +REG32(PPK1_11, 0x10fc) + +#define BIT_POS(ROW, COLUMN) (ROW * 32 + COLUMN) +#define R_MAX (R_PPK1_11 + 1) + +/* #define EFUSE_XOSC 26 */ + +/* + * eFUSE layout references: + * ZynqMP: UG1085 (v2.1) August 21, 2019, p.277, Table 12-13 + */ +#define EFUSE_AES_RDLK BIT_POS(22, 0) +#define EFUSE_AES_WRLK BIT_POS(22, 1) +#define EFUSE_ENC_ONLY BIT_POS(22, 2) +#define EFUSE_BBRAM_DIS BIT_POS(22, 3) +#define EFUSE_ERROR_DIS BIT_POS(22, 4) +#define EFUSE_JTAG_DIS BIT_POS(22, 5) +#define EFUSE_DFT_DIS BIT_POS(22, 6) +#define EFUSE_PROG_GATE_0 BIT_POS(22, 7) +#define EFUSE_PROG_GATE_1 BIT_POS(22, 7) +#define EFUSE_PROG_GATE_2 BIT_POS(22, 9) +#define EFUSE_SEC_LOCK BIT_POS(22, 10) +#define EFUSE_RSA_EN BIT_POS(22, 11) +#define EFUSE_RSA_EN14 BIT_POS(22, 25) +#define EFUSE_PPK0_WRLK BIT_POS(22, 26) +#define EFUSE_PPK0_INVLD BIT_POS(22, 27) +#define EFUSE_PPK0_INVLD_1 BIT_POS(22, 28) +#define EFUSE_PPK1_WRLK BIT_POS(22, 29) +#define EFUSE_PPK1_INVLD BIT_POS(22, 30) +#define EFUSE_PPK1_INVLD_1 BIT_POS(22, 31) + +/* Areas. */ +#define EFUSE_TRIM_START BIT_POS(1, 0) +#define EFUSE_TRIM_END BIT_POS(1, 30) +#define EFUSE_DNA_START BIT_POS(3, 0) +#define EFUSE_DNA_END BIT_POS(5, 31) +#define EFUSE_AES_START BIT_POS(24, 0) +#define EFUSE_AES_END BIT_POS(31, 31) +#define EFUSE_ROM_START BIT_POS(17, 0) +#define EFUSE_ROM_END BIT_POS(17, 31) +#define EFUSE_IPDIS_START BIT_POS(6, 0) +#define EFUSE_IPDIS_END BIT_POS(6, 31) +#define EFUSE_USER_START BIT_POS(8, 0) +#define EFUSE_USER_END BIT_POS(15, 31) +#define EFUSE_BISR_START BIT_POS(32, 0) +#define EFUSE_BISR_END BIT_POS(39, 31) + +#define EFUSE_USER_CTRL_START BIT_POS(16, 0) +#define EFUSE_USER_CTRL_END BIT_POS(16, 16) +#define EFUSE_USER_CTRL_MASK ((uint32_t)MAKE_64BIT_MASK(0, 17)) + +#define EFUSE_PUF_CHASH_START BIT_POS(20, 0) +#define EFUSE_PUF_CHASH_END BIT_POS(20, 31) +#define EFUSE_PUF_MISC_START BIT_POS(21, 0) +#define EFUSE_PUF_MISC_END BIT_POS(21, 31) +#define EFUSE_PUF_SYN_WRLK BIT_POS(21, 30) + +#define EFUSE_SPK_START BIT_POS(23, 0) +#define EFUSE_SPK_END BIT_POS(23, 31) + +#define EFUSE_PPK0_START BIT_POS(40, 0) +#define EFUSE_PPK0_END BIT_POS(51, 31) +#define EFUSE_PPK1_START BIT_POS(52, 0) +#define EFUSE_PPK1_END BIT_POS(63, 31) + +#define EFUSE_CACHE_FLD(s, reg, field) \ + ARRAY_FIELD_DP32((s)->regs, reg, field, \ + (xlnx_efuse_get_row((s->efuse), EFUSE_ ## field) \ + >> (EFUSE_ ## field % 32))) + +#define EFUSE_CACHE_BIT(s, reg, field) \ + ARRAY_FIELD_DP32((s)->regs, reg, field, xlnx_efuse_get_bit((s->efuse), \ + EFUSE_ ## field)) + +#define FBIT_UNKNOWN (~0) + +QEMU_BUILD_BUG_ON(R_MAX != ARRAY_SIZE(((XlnxZynqMPEFuse *)0)->regs)); + +static void update_tbit_status(XlnxZynqMPEFuse *s) +{ + unsigned int check = xlnx_efuse_tbits_check(s->efuse); + uint32_t val = s->regs[R_STATUS]; + + val = FIELD_DP32(val, STATUS, EFUSE_0_TBIT, !!(check & (1 << 0))); + val = FIELD_DP32(val, STATUS, EFUSE_2_TBIT, !!(check & (1 << 1))); + val = FIELD_DP32(val, STATUS, EFUSE_3_TBIT, !!(check & (1 << 2))); + + s->regs[R_STATUS] = val; +} + +/* Update the u32 array from efuse bits. Slow but simple approach. */ +static void cache_sync_u32(XlnxZynqMPEFuse *s, unsigned int r_start, + unsigned int f_start, unsigned int f_end, + unsigned int f_written) +{ + uint32_t *u32 = &s->regs[r_start]; + unsigned int fbit, wbits = 0, u32_off = 0; + + /* Avoid working on bits that are not relevant. */ + if (f_written != FBIT_UNKNOWN + && (f_written < f_start || f_written > f_end)) { + return; + } + + for (fbit = f_start; fbit <= f_end; fbit++, wbits++) { + if (wbits == 32) { + /* Update the key offset. */ + u32_off += 1; + wbits = 0; + } + u32[u32_off] |= xlnx_efuse_get_bit(s->efuse, fbit) << wbits; + } +} + +/* + * Keep the syncs in bit order so we can bail out for the + * slower ones. + */ +static void zynqmp_efuse_sync_cache(XlnxZynqMPEFuse *s, unsigned int bit) +{ + EFUSE_CACHE_BIT(s, SEC_CTRL, AES_RDLK); + EFUSE_CACHE_BIT(s, SEC_CTRL, AES_WRLK); + EFUSE_CACHE_BIT(s, SEC_CTRL, ENC_ONLY); + EFUSE_CACHE_BIT(s, SEC_CTRL, BBRAM_DIS); + EFUSE_CACHE_BIT(s, SEC_CTRL, ERROR_DIS); + EFUSE_CACHE_BIT(s, SEC_CTRL, JTAG_DIS); + EFUSE_CACHE_BIT(s, SEC_CTRL, DFT_DIS); + EFUSE_CACHE_BIT(s, SEC_CTRL, PROG_GATE_0); + EFUSE_CACHE_BIT(s, SEC_CTRL, PROG_GATE_1); + EFUSE_CACHE_BIT(s, SEC_CTRL, PROG_GATE_2); + EFUSE_CACHE_BIT(s, SEC_CTRL, SEC_LOCK); + EFUSE_CACHE_BIT(s, SEC_CTRL, PPK0_WRLK); + EFUSE_CACHE_BIT(s, SEC_CTRL, PPK1_WRLK); + + EFUSE_CACHE_FLD(s, SEC_CTRL, RSA_EN); + EFUSE_CACHE_FLD(s, SEC_CTRL, PPK0_INVLD); + EFUSE_CACHE_FLD(s, SEC_CTRL, PPK1_INVLD); + + /* Update the tbits. */ + update_tbit_status(s); + + /* Sync the various areas. */ + s->regs[R_MISC_USER_CTRL] = xlnx_efuse_get_row(s->efuse, + EFUSE_USER_CTRL_START) + & EFUSE_USER_CTRL_MASK; + s->regs[R_PUF_CHASH] = xlnx_efuse_get_row(s->efuse, EFUSE_PUF_CHASH_START); + s->regs[R_PUF_MISC] = xlnx_efuse_get_row(s->efuse, EFUSE_PUF_MISC_START); + + cache_sync_u32(s, R_DNA_0, EFUSE_DNA_START, EFUSE_DNA_END, bit); + + if (bit < EFUSE_AES_START) { + return; + } + + cache_sync_u32(s, R_ROM_RSVD, EFUSE_ROM_START, EFUSE_ROM_END, bit); + cache_sync_u32(s, R_IPDISABLE, EFUSE_IPDIS_START, EFUSE_IPDIS_END, bit); + cache_sync_u32(s, R_USER_0, EFUSE_USER_START, EFUSE_USER_END, bit); + cache_sync_u32(s, R_SPK_ID, EFUSE_SPK_START, EFUSE_SPK_END, bit); + cache_sync_u32(s, R_PPK0_0, EFUSE_PPK0_START, EFUSE_PPK0_END, bit); + cache_sync_u32(s, R_PPK1_0, EFUSE_PPK1_START, EFUSE_PPK1_END, bit); +} + +static void zynqmp_efuse_update_irq(XlnxZynqMPEFuse *s) +{ + bool pending = s->regs[R_EFUSE_ISR] & s->regs[R_EFUSE_IMR]; + qemu_set_irq(s->irq, pending); +} + +static void zynqmp_efuse_isr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque); + zynqmp_efuse_update_irq(s); +} + +static uint64_t zynqmp_efuse_ier_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque); + uint32_t val = val64; + + s->regs[R_EFUSE_IMR] |= val; + zynqmp_efuse_update_irq(s); + return 0; +} + +static uint64_t zynqmp_efuse_idr_prew(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque); + uint32_t val = val64; + + s->regs[R_EFUSE_IMR] &= ~val; + zynqmp_efuse_update_irq(s); + return 0; +} + +static void zynqmp_efuse_pgm_addr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque); + unsigned bit = val64; + unsigned page = FIELD_EX32(bit, EFUSE_PGM_ADDR, EFUSE); + bool puf_prot = false; + const char *errmsg = NULL; + + /* Allow only valid array, and adjust for skipped array 1 */ + switch (page) { + case 0: + break; + case 2 ... 3: + bit = FIELD_DP32(bit, EFUSE_PGM_ADDR, EFUSE, page - 1); + puf_prot = xlnx_efuse_get_bit(s->efuse, EFUSE_PUF_SYN_WRLK); + break; + default: + errmsg = "Invalid address"; + goto pgm_done; + } + + if (ARRAY_FIELD_EX32(s->regs, WR_LOCK, LOCK)) { + errmsg = "Array write-locked"; + goto pgm_done; + } + + if (!ARRAY_FIELD_EX32(s->regs, CFG, PGM_EN)) { + errmsg = "Array pgm-disabled"; + goto pgm_done; + } + + if (puf_prot) { + errmsg = "PUF_HD-store write-locked"; + goto pgm_done; + } + + if (ARRAY_FIELD_EX32(s->regs, SEC_CTRL, AES_WRLK) + && bit >= EFUSE_AES_START && bit <= EFUSE_AES_END) { + errmsg = "AES key-store Write-locked"; + goto pgm_done; + } + + if (!xlnx_efuse_set_bit(s->efuse, bit)) { + errmsg = "Write failed"; + } + + pgm_done: + if (!errmsg) { + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, PGM_ERROR, 0); + } else { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, PGM_ERROR, 1); + qemu_log_mask(LOG_GUEST_ERROR, + "%s - eFuse write error: %s; addr=0x%x\n", + path, errmsg, (unsigned)val64); + } + + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, PGM_DONE, 1); + zynqmp_efuse_update_irq(s); +} + +static void zynqmp_efuse_rd_addr_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque); + g_autofree char *path = NULL; + + /* + * Grant reads only to allowed bits; reference sources: + * 1/ XilSKey - XilSKey_ZynqMp_EfusePs_ReadRow() + * 2/ UG1085, v2.0, table 12-13 + * (note: enumerates the masks as per described in + * references to avoid mental translation). + */ +#define COL_MASK(L_, H_) \ + ((uint32_t)MAKE_64BIT_MASK((L_), (1 + (H_) - (L_)))) + + static const uint32_t ary0_col_mask[] = { + /* XilSKey - XSK_ZYNQMP_EFUSEPS_TBITS_ROW */ + [0] = COL_MASK(28, 31), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_USR{0:7}_FUSE_ROW */ + [8] = COL_MASK(0, 31), [9] = COL_MASK(0, 31), + [10] = COL_MASK(0, 31), [11] = COL_MASK(0, 31), + [12] = COL_MASK(0, 31), [13] = COL_MASK(0, 31), + [14] = COL_MASK(0, 31), [15] = COL_MASK(0, 31), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_MISC_USR_CTRL_ROW */ + [16] = COL_MASK(0, 7) | COL_MASK(10, 16), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_PBR_BOOT_ERR_ROW */ + [17] = COL_MASK(0, 2), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_PUF_CHASH_ROW */ + [20] = COL_MASK(0, 31), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_PUF_AUX_ROW */ + [21] = COL_MASK(0, 23) | COL_MASK(29, 31), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_SEC_CTRL_ROW */ + [22] = COL_MASK(0, 31), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_SPK_ID_ROW */ + [23] = COL_MASK(0, 31), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_PPK0_START_ROW */ + [40] = COL_MASK(0, 31), [41] = COL_MASK(0, 31), + [42] = COL_MASK(0, 31), [43] = COL_MASK(0, 31), + [44] = COL_MASK(0, 31), [45] = COL_MASK(0, 31), + [46] = COL_MASK(0, 31), [47] = COL_MASK(0, 31), + [48] = COL_MASK(0, 31), [49] = COL_MASK(0, 31), + [50] = COL_MASK(0, 31), [51] = COL_MASK(0, 31), + + /* XilSKey - XSK_ZYNQMP_EFUSEPS_PPK1_START_ROW */ + [52] = COL_MASK(0, 31), [53] = COL_MASK(0, 31), + [54] = COL_MASK(0, 31), [55] = COL_MASK(0, 31), + [56] = COL_MASK(0, 31), [57] = COL_MASK(0, 31), + [58] = COL_MASK(0, 31), [59] = COL_MASK(0, 31), + [60] = COL_MASK(0, 31), [61] = COL_MASK(0, 31), + [62] = COL_MASK(0, 31), [63] = COL_MASK(0, 31), + }; + + uint32_t col_mask = COL_MASK(0, 31); +#undef COL_MASK + + uint32_t efuse_idx = s->regs[R_EFUSE_RD_ADDR]; + uint32_t efuse_ary = FIELD_EX32(efuse_idx, EFUSE_RD_ADDR, EFUSE); + uint32_t efuse_row = FIELD_EX32(efuse_idx, EFUSE_RD_ADDR, ROW); + + switch (efuse_ary) { + case 0: /* Various */ + if (efuse_row >= ARRAY_SIZE(ary0_col_mask)) { + goto denied; + } + + col_mask = ary0_col_mask[efuse_row]; + if (!col_mask) { + goto denied; + } + break; + case 2: /* PUF helper data, adjust for skipped array 1 */ + case 3: + val64 = FIELD_DP32(efuse_idx, EFUSE_RD_ADDR, EFUSE, efuse_ary - 1); + break; + default: + goto denied; + } + + s->regs[R_EFUSE_RD_DATA] = xlnx_efuse_get_row(s->efuse, val64) & col_mask; + + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_ERROR, 0); + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_DONE, 1); + zynqmp_efuse_update_irq(s); + return; + + denied: + path = object_get_canonical_path(OBJECT(s)); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Denied efuse read from array %u, row %u\n", + path, efuse_ary, efuse_row); + + s->regs[R_EFUSE_RD_DATA] = 0; + + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_ERROR, 1); + ARRAY_FIELD_DP32(s->regs, EFUSE_ISR, RD_DONE, 0); + zynqmp_efuse_update_irq(s); +} + +static void zynqmp_efuse_aes_crc_postw(RegisterInfo *reg, uint64_t val64) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque); + bool ok; + + ok = xlnx_efuse_k256_check(s->efuse, (uint32_t)val64, EFUSE_AES_START); + + ARRAY_FIELD_DP32(s->regs, STATUS, AES_CRC_PASS, (ok ? 1 : 0)); + ARRAY_FIELD_DP32(s->regs, STATUS, AES_CRC_DONE, 1); + + s->regs[R_EFUSE_AES_CRC] = 0; /* crc value is write-only */ +} + +static uint64_t zynqmp_efuse_cache_load_prew(RegisterInfo *reg, + uint64_t valu64) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(reg->opaque); + + if (valu64 & R_EFUSE_CACHE_LOAD_LOAD_MASK) { + zynqmp_efuse_sync_cache(s, FBIT_UNKNOWN); + ARRAY_FIELD_DP32(s->regs, STATUS, CACHE_DONE, 1); + zynqmp_efuse_update_irq(s); + } + + return 0; +} + +static uint64_t zynqmp_efuse_wr_lock_prew(RegisterInfo *reg, uint64_t val) +{ + return val == 0xDF0D ? 0 : 1; +} + +static RegisterAccessInfo zynqmp_efuse_regs_info[] = { + { .name = "WR_LOCK", .addr = A_WR_LOCK, + .reset = 0x1, + .pre_write = zynqmp_efuse_wr_lock_prew, + },{ .name = "CFG", .addr = A_CFG, + },{ .name = "STATUS", .addr = A_STATUS, + .rsvd = 0x8, + .ro = 0xff, + },{ .name = "EFUSE_PGM_ADDR", .addr = A_EFUSE_PGM_ADDR, + .post_write = zynqmp_efuse_pgm_addr_postw + },{ .name = "EFUSE_RD_ADDR", .addr = A_EFUSE_RD_ADDR, + .rsvd = 0x1f, + .post_write = zynqmp_efuse_rd_addr_postw, + },{ .name = "EFUSE_RD_DATA", .addr = A_EFUSE_RD_DATA, + .ro = 0xffffffff, + },{ .name = "TPGM", .addr = A_TPGM, + },{ .name = "TRD", .addr = A_TRD, + .reset = 0x1b, + },{ .name = "TSU_H_PS", .addr = A_TSU_H_PS, + .reset = 0xff, + },{ .name = "TSU_H_PS_CS", .addr = A_TSU_H_PS_CS, + .reset = 0xb, + },{ .name = "TSU_H_CS", .addr = A_TSU_H_CS, + .reset = 0x7, + },{ .name = "EFUSE_ISR", .addr = A_EFUSE_ISR, + .rsvd = 0x7fffffe0, + .w1c = 0x8000001f, + .post_write = zynqmp_efuse_isr_postw, + },{ .name = "EFUSE_IMR", .addr = A_EFUSE_IMR, + .reset = 0x8000001f, + .rsvd = 0x7fffffe0, + .ro = 0xffffffff, + },{ .name = "EFUSE_IER", .addr = A_EFUSE_IER, + .rsvd = 0x7fffffe0, + .pre_write = zynqmp_efuse_ier_prew, + },{ .name = "EFUSE_IDR", .addr = A_EFUSE_IDR, + .rsvd = 0x7fffffe0, + .pre_write = zynqmp_efuse_idr_prew, + },{ .name = "EFUSE_CACHE_LOAD", .addr = A_EFUSE_CACHE_LOAD, + .pre_write = zynqmp_efuse_cache_load_prew, + },{ .name = "EFUSE_PGM_LOCK", .addr = A_EFUSE_PGM_LOCK, + },{ .name = "EFUSE_AES_CRC", .addr = A_EFUSE_AES_CRC, + .post_write = zynqmp_efuse_aes_crc_postw, + },{ .name = "EFUSE_TBITS_PRGRMG_EN", .addr = A_EFUSE_TBITS_PRGRMG_EN, + .reset = R_EFUSE_TBITS_PRGRMG_EN_TBITS_PRGRMG_EN_MASK, + },{ .name = "DNA_0", .addr = A_DNA_0, + .ro = 0xffffffff, + },{ .name = "DNA_1", .addr = A_DNA_1, + .ro = 0xffffffff, + },{ .name = "DNA_2", .addr = A_DNA_2, + .ro = 0xffffffff, + },{ .name = "IPDISABLE", .addr = A_IPDISABLE, + .ro = 0xffffffff, + },{ .name = "SYSOSC_CTRL", .addr = A_SYSOSC_CTRL, + .ro = 0xffffffff, + },{ .name = "USER_0", .addr = A_USER_0, + .ro = 0xffffffff, + },{ .name = "USER_1", .addr = A_USER_1, + .ro = 0xffffffff, + },{ .name = "USER_2", .addr = A_USER_2, + .ro = 0xffffffff, + },{ .name = "USER_3", .addr = A_USER_3, + .ro = 0xffffffff, + },{ .name = "USER_4", .addr = A_USER_4, + .ro = 0xffffffff, + },{ .name = "USER_5", .addr = A_USER_5, + .ro = 0xffffffff, + },{ .name = "USER_6", .addr = A_USER_6, + .ro = 0xffffffff, + },{ .name = "USER_7", .addr = A_USER_7, + .ro = 0xffffffff, + },{ .name = "MISC_USER_CTRL", .addr = A_MISC_USER_CTRL, + .ro = 0xffffffff, + },{ .name = "ROM_RSVD", .addr = A_ROM_RSVD, + .ro = 0xffffffff, + },{ .name = "PUF_CHASH", .addr = A_PUF_CHASH, + .ro = 0xffffffff, + },{ .name = "PUF_MISC", .addr = A_PUF_MISC, + .ro = 0xffffffff, + },{ .name = "SEC_CTRL", .addr = A_SEC_CTRL, + .ro = 0xffffffff, + },{ .name = "SPK_ID", .addr = A_SPK_ID, + .ro = 0xffffffff, + },{ .name = "PPK0_0", .addr = A_PPK0_0, + .ro = 0xffffffff, + },{ .name = "PPK0_1", .addr = A_PPK0_1, + .ro = 0xffffffff, + },{ .name = "PPK0_2", .addr = A_PPK0_2, + .ro = 0xffffffff, + },{ .name = "PPK0_3", .addr = A_PPK0_3, + .ro = 0xffffffff, + },{ .name = "PPK0_4", .addr = A_PPK0_4, + .ro = 0xffffffff, + },{ .name = "PPK0_5", .addr = A_PPK0_5, + .ro = 0xffffffff, + },{ .name = "PPK0_6", .addr = A_PPK0_6, + .ro = 0xffffffff, + },{ .name = "PPK0_7", .addr = A_PPK0_7, + .ro = 0xffffffff, + },{ .name = "PPK0_8", .addr = A_PPK0_8, + .ro = 0xffffffff, + },{ .name = "PPK0_9", .addr = A_PPK0_9, + .ro = 0xffffffff, + },{ .name = "PPK0_10", .addr = A_PPK0_10, + .ro = 0xffffffff, + },{ .name = "PPK0_11", .addr = A_PPK0_11, + .ro = 0xffffffff, + },{ .name = "PPK1_0", .addr = A_PPK1_0, + .ro = 0xffffffff, + },{ .name = "PPK1_1", .addr = A_PPK1_1, + .ro = 0xffffffff, + },{ .name = "PPK1_2", .addr = A_PPK1_2, + .ro = 0xffffffff, + },{ .name = "PPK1_3", .addr = A_PPK1_3, + .ro = 0xffffffff, + },{ .name = "PPK1_4", .addr = A_PPK1_4, + .ro = 0xffffffff, + },{ .name = "PPK1_5", .addr = A_PPK1_5, + .ro = 0xffffffff, + },{ .name = "PPK1_6", .addr = A_PPK1_6, + .ro = 0xffffffff, + },{ .name = "PPK1_7", .addr = A_PPK1_7, + .ro = 0xffffffff, + },{ .name = "PPK1_8", .addr = A_PPK1_8, + .ro = 0xffffffff, + },{ .name = "PPK1_9", .addr = A_PPK1_9, + .ro = 0xffffffff, + },{ .name = "PPK1_10", .addr = A_PPK1_10, + .ro = 0xffffffff, + },{ .name = "PPK1_11", .addr = A_PPK1_11, + .ro = 0xffffffff, + } +}; + +static void zynqmp_efuse_reg_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + RegisterInfoArray *reg_array = opaque; + XlnxZynqMPEFuse *s; + Object *dev; + + assert(reg_array != NULL); + + dev = reg_array->mem.owner; + assert(dev); + + s = XLNX_ZYNQMP_EFUSE(dev); + + if (addr != A_WR_LOCK && s->regs[R_WR_LOCK]) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + qemu_log_mask(LOG_GUEST_ERROR, + "%s[reg_0x%02lx]: Attempt to write locked register.\n", + path, (long)addr); + } else { + register_write_memory(opaque, addr, data, size); + } +} + +static const MemoryRegionOps zynqmp_efuse_ops = { + .read = register_read_memory, + .write = zynqmp_efuse_reg_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static void zynqmp_efuse_register_reset(RegisterInfo *reg) +{ + if (!reg->data || !reg->access) { + return; + } + + /* Reset must not trigger some registers' writers */ + switch (reg->access->addr) { + case A_EFUSE_AES_CRC: + *(uint32_t *)reg->data = reg->access->reset; + return; + } + + register_reset(reg); +} + +static void zynqmp_efuse_reset(DeviceState *dev) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(dev); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) { + zynqmp_efuse_register_reset(&s->regs_info[i]); + } + + zynqmp_efuse_sync_cache(s, FBIT_UNKNOWN); + ARRAY_FIELD_DP32(s->regs, STATUS, CACHE_DONE, 1); + zynqmp_efuse_update_irq(s); +} + +static void zynqmp_efuse_realize(DeviceState *dev, Error **errp) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(dev); + + if (!s->efuse) { + g_autofree char *path = object_get_canonical_path(OBJECT(s)); + + error_setg(errp, "%s.efuse: link property not connected to XLNX-EFUSE", + path); + return; + } + + s->efuse->dev = dev; +} + +static void zynqmp_efuse_init(Object *obj) +{ + XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + RegisterInfoArray *reg_array; + + reg_array = + register_init_block32(DEVICE(obj), zynqmp_efuse_regs_info, + ARRAY_SIZE(zynqmp_efuse_regs_info), + s->regs_info, s->regs, + &zynqmp_efuse_ops, + ZYNQMP_EFUSE_ERR_DEBUG, + R_MAX * 4); + + sysbus_init_mmio(sbd, ®_array->mem); + sysbus_init_irq(sbd, &s->irq); +} + +static const VMStateDescription vmstate_efuse = { + .name = TYPE_XLNX_ZYNQMP_EFUSE, + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, XlnxZynqMPEFuse, R_MAX), + VMSTATE_END_OF_LIST(), + } +}; + +static Property zynqmp_efuse_props[] = { + DEFINE_PROP_LINK("efuse", + XlnxZynqMPEFuse, efuse, + TYPE_XLNX_EFUSE, XlnxEFuse *), + + DEFINE_PROP_END_OF_LIST(), +}; + +static void zynqmp_efuse_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = zynqmp_efuse_reset; + dc->realize = zynqmp_efuse_realize; + dc->vmsd = &vmstate_efuse; + device_class_set_props(dc, zynqmp_efuse_props); +} + + +static const TypeInfo efuse_info = { + .name = TYPE_XLNX_ZYNQMP_EFUSE, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxZynqMPEFuse), + .class_init = zynqmp_efuse_class_init, + .instance_init = zynqmp_efuse_init, +}; + +static void efuse_register_types(void) +{ + type_register_static(&efuse_info); +} + +type_init(efuse_register_types) diff --git a/hw/pci-bridge/pci_expander_bridge.c b/hw/pci-bridge/pci_expander_bridge.c index 7112dc3062..10e6e7c2ab 100644 --- a/hw/pci-bridge/pci_expander_bridge.c +++ b/hw/pci-bridge/pci_expander_bridge.c @@ -245,7 +245,7 @@ static void pxb_dev_realize_common(PCIDevice *dev, bool pcie, Error **errp) } else { bus = pci_root_bus_new(ds, "pxb-internal", NULL, NULL, 0, TYPE_PXB_BUS); bds = qdev_new("pci-bridge"); - bds->id = dev_name; + bds->id = g_strdup(dev_name); qdev_prop_set_uint8(bds, PCI_BRIDGE_DEV_PROP_CHASSIS_NR, pxb->bus_nr); qdev_prop_set_bit(bds, PCI_BRIDGE_DEV_PROP_SHPC, false); } diff --git a/hw/pci-host/mv64361.c b/hw/pci-host/mv64361.c index 92b0f5d047..00b3ff7d90 100644 --- a/hw/pci-host/mv64361.c +++ b/hw/pci-host/mv64361.c @@ -869,6 +869,7 @@ static void mv64361_realize(DeviceState *dev, Error **errp) s->base_addr_enable = 0x1fffff; memory_region_init_io(&s->regs, OBJECT(s), &mv64361_ops, s, TYPE_MV64361, 0x10000); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->regs); for (i = 0; i < 2; i++) { g_autofree char *name = g_strdup_printf("pcihost%d", i); object_initialize_child(OBJECT(dev), name, &s->pci[i], diff --git a/hw/pci-host/raven.c b/hw/pci-host/raven.c index 3be27f0a14..6e514f75eb 100644 --- a/hw/pci-host/raven.c +++ b/hw/pci-host/raven.c @@ -300,8 +300,8 @@ static void raven_pcihost_initfn(Object *obj) memory_region_add_subregion_overlap(address_space_mem, PCI_IO_BASE_ADDR, &s->pci_io_non_contiguous, 1); memory_region_add_subregion(address_space_mem, 0xc0000000, &s->pci_memory); - pci_root_bus_new_inplace(&s->pci_bus, sizeof(s->pci_bus), DEVICE(obj), NULL, - &s->pci_memory, &s->pci_io, 0, TYPE_PCI_BUS); + pci_root_bus_init(&s->pci_bus, sizeof(s->pci_bus), DEVICE(obj), NULL, + &s->pci_memory, &s->pci_io, 0, TYPE_PCI_BUS); /* Bus master address space */ memory_region_init(&s->bm, obj, "bm-raven", 4 * GiB); diff --git a/hw/pci-host/sh_pci.c b/hw/pci-host/sh_pci.c index 08c1562e22..719d6ca2a6 100644 --- a/hw/pci-host/sh_pci.c +++ b/hw/pci-host/sh_pci.c @@ -49,13 +49,12 @@ struct SHPCIState { uint32_t iobr; }; -static void sh_pci_reg_write (void *p, hwaddr addr, uint64_t val, - unsigned size) +static void sh_pci_reg_write(void *p, hwaddr addr, uint64_t val, unsigned size) { SHPCIState *pcic = p; PCIHostState *phb = PCI_HOST_BRIDGE(pcic); - switch(addr) { + switch (addr) { case 0 ... 0xfc: stl_le_p(pcic->dev->config + addr, val); break; @@ -75,13 +74,12 @@ static void sh_pci_reg_write (void *p, hwaddr addr, uint64_t val, } } -static uint64_t sh_pci_reg_read (void *p, hwaddr addr, - unsigned size) +static uint64_t sh_pci_reg_read(void *p, hwaddr addr, unsigned size) { SHPCIState *pcic = p; PCIHostState *phb = PCI_HOST_BRIDGE(pcic); - switch(addr) { + switch (addr) { case 0 ... 0xfc: return ldl_le_p(pcic->dev->config + addr); case 0x1c0: diff --git a/hw/pci-host/versatile.c b/hw/pci-host/versatile.c index 3553277f94..f66384fa02 100644 --- a/hw/pci-host/versatile.c +++ b/hw/pci-host/versatile.c @@ -405,9 +405,9 @@ static void pci_vpb_realize(DeviceState *dev, Error **errp) memory_region_init(&s->pci_io_space, OBJECT(s), "pci_io", 4 * GiB); memory_region_init(&s->pci_mem_space, OBJECT(s), "pci_mem", 4 * GiB); - pci_root_bus_new_inplace(&s->pci_bus, sizeof(s->pci_bus), dev, "pci", - &s->pci_mem_space, &s->pci_io_space, - PCI_DEVFN(11, 0), TYPE_PCI_BUS); + pci_root_bus_init(&s->pci_bus, sizeof(s->pci_bus), dev, "pci", + &s->pci_mem_space, &s->pci_io_space, + PCI_DEVFN(11, 0), TYPE_PCI_BUS); h->bus = &s->pci_bus; object_initialize(&s->pci_dev, sizeof(s->pci_dev), TYPE_VERSATILE_PCI_HOST); diff --git a/hw/pci/pci.c b/hw/pci/pci.c index 23d2ae2ab2..4a84e478ce 100644 --- a/hw/pci/pci.c +++ b/hw/pci/pci.c @@ -432,10 +432,10 @@ bool pci_bus_bypass_iommu(PCIBus *bus) return host_bridge->bypass_iommu; } -static void pci_root_bus_init(PCIBus *bus, DeviceState *parent, - MemoryRegion *address_space_mem, - MemoryRegion *address_space_io, - uint8_t devfn_min) +static void pci_root_bus_internal_init(PCIBus *bus, DeviceState *parent, + MemoryRegion *address_space_mem, + MemoryRegion *address_space_io, + uint8_t devfn_min) { assert(PCI_FUNC(devfn_min) == 0); bus->devfn_min = devfn_min; @@ -460,15 +460,15 @@ bool pci_bus_is_express(PCIBus *bus) return object_dynamic_cast(OBJECT(bus), TYPE_PCIE_BUS); } -void pci_root_bus_new_inplace(PCIBus *bus, size_t bus_size, DeviceState *parent, - const char *name, - MemoryRegion *address_space_mem, - MemoryRegion *address_space_io, - uint8_t devfn_min, const char *typename) +void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent, + const char *name, + MemoryRegion *address_space_mem, + MemoryRegion *address_space_io, + uint8_t devfn_min, const char *typename) { - qbus_create_inplace(bus, bus_size, typename, parent, name); - pci_root_bus_init(bus, parent, address_space_mem, address_space_io, - devfn_min); + qbus_init(bus, bus_size, typename, parent, name); + pci_root_bus_internal_init(bus, parent, address_space_mem, + address_space_io, devfn_min); } PCIBus *pci_root_bus_new(DeviceState *parent, const char *name, @@ -478,9 +478,9 @@ PCIBus *pci_root_bus_new(DeviceState *parent, const char *name, { PCIBus *bus; - bus = PCI_BUS(qbus_create(typename, parent, name)); - pci_root_bus_init(bus, parent, address_space_mem, address_space_io, - devfn_min); + bus = PCI_BUS(qbus_new(typename, parent, name)); + pci_root_bus_internal_init(bus, parent, address_space_mem, + address_space_io, devfn_min); return bus; } @@ -1654,11 +1654,9 @@ static const pci_class_desc pci_class_descriptions[] = { 0, NULL} }; -static void pci_for_each_device_under_bus_reverse(PCIBus *bus, - void (*fn)(PCIBus *b, - PCIDevice *d, - void *opaque), - void *opaque) +void pci_for_each_device_under_bus_reverse(PCIBus *bus, + pci_bus_dev_fn fn, + void *opaque) { PCIDevice *d; int devfn; @@ -1672,8 +1670,7 @@ static void pci_for_each_device_under_bus_reverse(PCIBus *bus, } void pci_for_each_device_reverse(PCIBus *bus, int bus_num, - void (*fn)(PCIBus *b, PCIDevice *d, void *opaque), - void *opaque) + pci_bus_dev_fn fn, void *opaque) { bus = pci_find_bus_nr(bus, bus_num); @@ -1682,10 +1679,8 @@ void pci_for_each_device_reverse(PCIBus *bus, int bus_num, } } -static void pci_for_each_device_under_bus(PCIBus *bus, - void (*fn)(PCIBus *b, PCIDevice *d, - void *opaque), - void *opaque) +void pci_for_each_device_under_bus(PCIBus *bus, + pci_bus_dev_fn fn, void *opaque) { PCIDevice *d; int devfn; @@ -1699,8 +1694,7 @@ static void pci_for_each_device_under_bus(PCIBus *bus, } void pci_for_each_device(PCIBus *bus, int bus_num, - void (*fn)(PCIBus *b, PCIDevice *d, void *opaque), - void *opaque) + pci_bus_dev_fn fn, void *opaque) { bus = pci_find_bus_nr(bus, bus_num); @@ -2078,10 +2072,8 @@ static PCIBus *pci_find_bus_nr(PCIBus *bus, int bus_num) return NULL; } -void pci_for_each_bus_depth_first(PCIBus *bus, - void *(*begin)(PCIBus *bus, void *parent_state), - void (*end)(PCIBus *bus, void *state), - void *parent_state) +void pci_for_each_bus_depth_first(PCIBus *bus, pci_bus_ret_fn begin, + pci_bus_fn end, void *parent_state) { PCIBus *sec; void *state; diff --git a/hw/pci/pci_bridge.c b/hw/pci/pci_bridge.c index 3789c17edc..da34c8ebcd 100644 --- a/hw/pci/pci_bridge.c +++ b/hw/pci/pci_bridge.c @@ -374,8 +374,8 @@ void pci_bridge_initfn(PCIDevice *dev, const char *typename) br->bus_name = dev->qdev.id; } - qbus_create_inplace(sec_bus, sizeof(br->sec_bus), typename, DEVICE(dev), - br->bus_name); + qbus_init(sec_bus, sizeof(br->sec_bus), typename, DEVICE(dev), + br->bus_name); sec_bus->parent_dev = dev; sec_bus->map_irq = br->map_irq ? br->map_irq : pci_swizzle_map_irq_fn; sec_bus->address_space_mem = &br->address_space_mem; @@ -448,11 +448,11 @@ int pci_bridge_qemu_reserve_cap_init(PCIDevice *dev, int cap_offset, PCIBridgeQemuCap cap = { .len = cap_len, .type = REDHAT_PCI_CAP_RESOURCE_RESERVE, - .bus_res = res_reserve.bus, - .io = res_reserve.io, - .mem = res_reserve.mem_non_pref, - .mem_pref_32 = res_reserve.mem_pref_32, - .mem_pref_64 = res_reserve.mem_pref_64 + .bus_res = cpu_to_le32(res_reserve.bus), + .io = cpu_to_le64(res_reserve.io), + .mem = cpu_to_le32(res_reserve.mem_non_pref), + .mem_pref_32 = cpu_to_le32(res_reserve.mem_pref_32), + .mem_pref_64 = cpu_to_le64(res_reserve.mem_pref_64) }; int offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, diff --git a/hw/pci/pcie.c b/hw/pci/pcie.c index 6e95d82903..914a9bf3d1 100644 --- a/hw/pci/pcie.c +++ b/hw/pci/pcie.c @@ -694,9 +694,7 @@ void pcie_cap_slot_write_config(PCIDevice *dev, (!(old_slt_ctl & PCI_EXP_SLTCTL_PCC) || (old_slt_ctl & PCI_EXP_SLTCTL_PIC_OFF) != PCI_EXP_SLTCTL_PIC_OFF)) { PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev)); - pci_for_each_device(sec_bus, pci_bus_num(sec_bus), - pcie_unplug_device, NULL); - + pci_for_each_device_under_bus(sec_bus, pcie_unplug_device, NULL); pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_PDS); if (dev->cap_present & QEMU_PCIE_LNKSTA_DLLLA || diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c index 95451414dd..960e7efcd3 100644 --- a/hw/ppc/e500.c +++ b/hw/ppc/e500.c @@ -1006,7 +1006,7 @@ void ppce500_init(MachineState *machine) /* Platform Bus Device */ if (pmc->has_platform_bus) { dev = qdev_new(TYPE_PLATFORM_BUS_DEVICE); - dev->id = TYPE_PLATFORM_BUS_DEVICE; + dev->id = g_strdup(TYPE_PLATFORM_BUS_DEVICE); qdev_prop_set_uint32(dev, "num_irqs", pmc->platform_bus_num_irqs); qdev_prop_set_uint32(dev, "mmio_size", pmc->platform_bus_size); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); diff --git a/hw/ppc/pegasos2.c b/hw/ppc/pegasos2.c index b8ce859f1a..298e6b93e2 100644 --- a/hw/ppc/pegasos2.c +++ b/hw/ppc/pegasos2.c @@ -22,6 +22,8 @@ #include "hw/i2c/smbus_eeprom.h" #include "hw/qdev-properties.h" #include "sysemu/reset.h" +#include "sysemu/runstate.h" +#include "sysemu/qtest.h" #include "hw/boards.h" #include "hw/loader.h" #include "hw/fw-path-provider.h" @@ -31,6 +33,8 @@ #include "sysemu/kvm.h" #include "kvm_ppc.h" #include "exec/address-spaces.h" +#include "qom/qom-qobject.h" +#include "qapi/qmp/qdict.h" #include "trace.h" #include "qemu/datadir.h" #include "sysemu/device_tree.h" @@ -52,11 +56,13 @@ #define BUS_FREQ_HZ 133333333 +#define PCI0_CFG_ADDR 0xcf8 #define PCI0_MEM_BASE 0xc0000000 #define PCI0_MEM_SIZE 0x20000000 #define PCI0_IO_BASE 0xf8000000 #define PCI0_IO_SIZE 0x10000 +#define PCI1_CFG_ADDR 0xc78 #define PCI1_MEM_BASE 0x80000000 #define PCI1_MEM_SIZE 0x40000000 #define PCI1_IO_BASE 0xfe000000 @@ -117,6 +123,10 @@ static void pegasos2_init(MachineState *machine) qemu_register_reset(pegasos2_cpu_reset, pm->cpu); /* RAM */ + if (machine->ram_size > 2 * GiB) { + error_report("RAM size more than 2 GiB is not supported"); + exit(1); + } memory_region_add_subregion(get_system_memory(), 0, machine->ram); /* allocate and load firmware */ @@ -190,62 +200,58 @@ static void pegasos2_init(MachineState *machine) if (!pm->vof) { warn_report("Option -kernel may be ineffective with -bios."); } + } else if (pm->vof && !qtest_enabled()) { + warn_report("Using Virtual OpenFirmware but no -kernel option."); } + if (!pm->vof && machine->kernel_cmdline && machine->kernel_cmdline[0]) { warn_report("Option -append may be ineffective with -bios."); } } -static uint32_t pegasos2_pci_config_read(AddressSpace *as, int bus, +static uint32_t pegasos2_mv_reg_read(Pegasos2MachineState *pm, + uint32_t addr, uint32_t len) +{ + MemoryRegion *r = sysbus_mmio_get_region(SYS_BUS_DEVICE(pm->mv), 0); + uint64_t val = 0xffffffffULL; + memory_region_dispatch_read(r, addr, &val, size_memop(len) | MO_LE, + MEMTXATTRS_UNSPECIFIED); + return val; +} + +static void pegasos2_mv_reg_write(Pegasos2MachineState *pm, uint32_t addr, + uint32_t len, uint32_t val) +{ + MemoryRegion *r = sysbus_mmio_get_region(SYS_BUS_DEVICE(pm->mv), 0); + memory_region_dispatch_write(r, addr, val, size_memop(len) | MO_LE, + MEMTXATTRS_UNSPECIFIED); +} + +static uint32_t pegasos2_pci_config_read(Pegasos2MachineState *pm, int bus, uint32_t addr, uint32_t len) { - hwaddr pcicfg = (bus ? 0xf1000c78 : 0xf1000cf8); - uint32_t val = 0xffffffff; + hwaddr pcicfg = bus ? PCI1_CFG_ADDR : PCI0_CFG_ADDR; + uint64_t val = 0xffffffffULL; - stl_le_phys(as, pcicfg, addr | BIT(31)); - switch (len) { - case 4: - val = ldl_le_phys(as, pcicfg + 4); - break; - case 2: - val = lduw_le_phys(as, pcicfg + 4); - break; - case 1: - val = ldub_phys(as, pcicfg + 4); - break; - default: - qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid length\n", __func__); - break; + if (len <= 4) { + pegasos2_mv_reg_write(pm, pcicfg, 4, addr | BIT(31)); + val = pegasos2_mv_reg_read(pm, pcicfg + 4, len); } return val; } -static void pegasos2_pci_config_write(AddressSpace *as, int bus, uint32_t addr, - uint32_t len, uint32_t val) +static void pegasos2_pci_config_write(Pegasos2MachineState *pm, int bus, + uint32_t addr, uint32_t len, uint32_t val) { - hwaddr pcicfg = (bus ? 0xf1000c78 : 0xf1000cf8); + hwaddr pcicfg = bus ? PCI1_CFG_ADDR : PCI0_CFG_ADDR; - stl_le_phys(as, pcicfg, addr | BIT(31)); - switch (len) { - case 4: - stl_le_phys(as, pcicfg + 4, val); - break; - case 2: - stw_le_phys(as, pcicfg + 4, val); - break; - case 1: - stb_phys(as, pcicfg + 4, val); - break; - default: - qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid length\n", __func__); - break; - } + pegasos2_mv_reg_write(pm, pcicfg, 4, addr | BIT(31)); + pegasos2_mv_reg_write(pm, pcicfg + 4, len, val); } static void pegasos2_machine_reset(MachineState *machine) { Pegasos2MachineState *pm = PEGASOS2_MACHINE(machine); - AddressSpace *as = CPU(pm->cpu)->as; void *fdt; uint64_t d[2]; int sz; @@ -256,51 +262,51 @@ static void pegasos2_machine_reset(MachineState *machine) } /* Otherwise, set up devices that board firmware would normally do */ - stl_le_phys(as, 0xf1000000, 0x28020ff); - stl_le_phys(as, 0xf1000278, 0xa31fc); - stl_le_phys(as, 0xf100f300, 0x11ff0400); - stl_le_phys(as, 0xf100f10c, 0x80000000); - stl_le_phys(as, 0xf100001c, 0x8000000); - pegasos2_pci_config_write(as, 0, PCI_COMMAND, 2, PCI_COMMAND_IO | + pegasos2_mv_reg_write(pm, 0, 4, 0x28020ff); + pegasos2_mv_reg_write(pm, 0x278, 4, 0xa31fc); + pegasos2_mv_reg_write(pm, 0xf300, 4, 0x11ff0400); + pegasos2_mv_reg_write(pm, 0xf10c, 4, 0x80000000); + pegasos2_mv_reg_write(pm, 0x1c, 4, 0x8000000); + pegasos2_pci_config_write(pm, 0, PCI_COMMAND, 2, PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); - pegasos2_pci_config_write(as, 1, PCI_COMMAND, 2, PCI_COMMAND_IO | + pegasos2_pci_config_write(pm, 1, PCI_COMMAND, 2, PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 0) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 0) << 8) | PCI_INTERRUPT_LINE, 2, 0x9); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 0) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 0) << 8) | 0x50, 1, 0x2); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 1) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 1) << 8) | PCI_INTERRUPT_LINE, 2, 0x109); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 1) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 1) << 8) | PCI_CLASS_PROG, 1, 0xf); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 1) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 1) << 8) | 0x40, 1, 0xb); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 1) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 1) << 8) | 0x50, 4, 0x17171717); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 1) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 1) << 8) | PCI_COMMAND, 2, 0x87); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 2) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 2) << 8) | PCI_INTERRUPT_LINE, 2, 0x409); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 3) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 3) << 8) | PCI_INTERRUPT_LINE, 2, 0x409); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 4) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 4) << 8) | PCI_INTERRUPT_LINE, 2, 0x9); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 4) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 4) << 8) | 0x48, 4, 0xf00); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 4) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 4) << 8) | 0x40, 4, 0x558020); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 4) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 4) << 8) | 0x90, 4, 0xd00); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 5) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 5) << 8) | PCI_INTERRUPT_LINE, 2, 0x309); - pegasos2_pci_config_write(as, 1, (PCI_DEVFN(12, 6) << 8) | + pegasos2_pci_config_write(pm, 1, (PCI_DEVFN(12, 6) << 8) | PCI_INTERRUPT_LINE, 2, 0x309); /* Device tree and VOF set up */ @@ -362,6 +368,29 @@ static target_ulong pegasos2_rtas(PowerPCCPU *cpu, Pegasos2MachineState *pm, return H_PARAMETER; } switch (token) { + case RTAS_GET_TIME_OF_DAY: + { + QObject *qo = object_property_get_qobject(qdev_get_machine(), + "rtc-time", &error_fatal); + QDict *qd = qobject_to(QDict, qo); + + if (nargs != 0 || nrets != 8 || !qd) { + stl_be_phys(as, rets, -1); + qobject_unref(qo); + return H_PARAMETER; + } + + stl_be_phys(as, rets, 0); + stl_be_phys(as, rets + 4, qdict_get_int(qd, "tm_year") + 1900); + stl_be_phys(as, rets + 8, qdict_get_int(qd, "tm_mon") + 1); + stl_be_phys(as, rets + 12, qdict_get_int(qd, "tm_mday")); + stl_be_phys(as, rets + 16, qdict_get_int(qd, "tm_hour")); + stl_be_phys(as, rets + 20, qdict_get_int(qd, "tm_min")); + stl_be_phys(as, rets + 24, qdict_get_int(qd, "tm_sec")); + stl_be_phys(as, rets + 28, 0); + qobject_unref(qo); + return H_SUCCESS; + } case RTAS_READ_PCI_CONFIG: { uint32_t addr, len, val; @@ -372,7 +401,7 @@ static target_ulong pegasos2_rtas(PowerPCCPU *cpu, Pegasos2MachineState *pm, } addr = ldl_be_phys(as, args); len = ldl_be_phys(as, args + 4); - val = pegasos2_pci_config_read(as, !(addr >> 24), + val = pegasos2_pci_config_read(pm, !(addr >> 24), addr & 0x0fffffff, len); stl_be_phys(as, rets, 0); stl_be_phys(as, rets + 4, val); @@ -389,7 +418,7 @@ static target_ulong pegasos2_rtas(PowerPCCPU *cpu, Pegasos2MachineState *pm, addr = ldl_be_phys(as, args); len = ldl_be_phys(as, args + 4); val = ldl_be_phys(as, args + 8); - pegasos2_pci_config_write(as, !(addr >> 24), + pegasos2_pci_config_write(pm, !(addr >> 24), addr & 0x0fffffff, len, val); stl_be_phys(as, rets, 0); return H_SUCCESS; @@ -402,6 +431,16 @@ static target_ulong pegasos2_rtas(PowerPCCPU *cpu, Pegasos2MachineState *pm, qemu_log_mask(LOG_UNIMP, "%c", ldl_be_phys(as, args)); stl_be_phys(as, rets, 0); return H_SUCCESS; + case RTAS_POWER_OFF: + { + if (nargs != 2 || nrets != 1) { + stl_be_phys(as, rets, -1); + return H_PARAMETER; + } + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + stl_be_phys(as, rets, 0); + return H_SUCCESS; + } default: qemu_log_mask(LOG_UNIMP, "Unknown RTAS token %u (args=%u, rets=%u)\n", token, nargs, nrets); diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c index 2f5358b70c..71e45515f1 100644 --- a/hw/ppc/pnv.c +++ b/hw/ppc/pnv.c @@ -723,6 +723,8 @@ static uint64_t pnv_chip_get_ram_size(PnvMachineState *pnv, int chip_id) return QEMU_ALIGN_DOWN(ram_per_chip, 1 * MiB); } + assert(pnv->num_chips > 1); + ram_per_chip = (machine->ram_size - 1 * GiB) / (pnv->num_chips - 1); return chip_id == 0 ? 1 * GiB : QEMU_ALIGN_DOWN(ram_per_chip, 1 * MiB); } @@ -838,8 +840,7 @@ static void pnv_init(MachineState *machine) for (i = 0; i < pnv->num_chips; i++) { char chip_name[32]; Object *chip = OBJECT(qdev_new(chip_typename)); - int chip_id = i; - uint64_t chip_ram_size = pnv_chip_get_ram_size(pnv, chip_id); + uint64_t chip_ram_size = pnv_chip_get_ram_size(pnv, i); pnv->chips[i] = PNV_CHIP(chip); @@ -850,9 +851,9 @@ static void pnv_init(MachineState *machine) &error_fatal); chip_ram_start += chip_ram_size; - snprintf(chip_name, sizeof(chip_name), "chip[%d]", chip_id); + snprintf(chip_name, sizeof(chip_name), "chip[%d]", i); object_property_add_child(OBJECT(pnv), chip_name, chip); - object_property_set_int(chip, "chip-id", chip_id, &error_fatal); + object_property_set_int(chip, "chip-id", i, &error_fatal); object_property_set_int(chip, "nr-cores", machine->smp.cores, &error_fatal); object_property_set_int(chip, "nr-threads", machine->smp.threads, @@ -1369,10 +1370,10 @@ static void pnv_chip_quad_realize(Pnv9Chip *chip9, Error **errp) sizeof(*eq), TYPE_PNV_QUAD, &error_fatal, NULL); - object_property_set_int(OBJECT(eq), "id", core_id, &error_fatal); + object_property_set_int(OBJECT(eq), "quad-id", core_id, &error_fatal); qdev_realize(DEVICE(eq), NULL, &error_fatal); - pnv_xscom_add_subregion(chip, PNV9_XSCOM_EQ_BASE(eq->id), + pnv_xscom_add_subregion(chip, PNV9_XSCOM_EQ_BASE(eq->quad_id), &eq->xscom_regs); } } diff --git a/hw/ppc/pnv_core.c b/hw/ppc/pnv_core.c index 4de8414df2..19e8eb885f 100644 --- a/hw/ppc/pnv_core.c +++ b/hw/ppc/pnv_core.c @@ -407,13 +407,13 @@ static void pnv_quad_realize(DeviceState *dev, Error **errp) PnvQuad *eq = PNV_QUAD(dev); char name[32]; - snprintf(name, sizeof(name), "xscom-quad.%d", eq->id); + snprintf(name, sizeof(name), "xscom-quad.%d", eq->quad_id); pnv_xscom_region_init(&eq->xscom_regs, OBJECT(dev), &pnv_quad_xscom_ops, eq, name, PNV9_XSCOM_EQ_SIZE); } static Property pnv_quad_properties[] = { - DEFINE_PROP_UINT32("id", PnvQuad, id, 0), + DEFINE_PROP_UINT32("quad-id", PnvQuad, quad_id, 0), DEFINE_PROP_END_OF_LIST(), }; diff --git a/hw/ppc/pnv_pnor.c b/hw/ppc/pnv_pnor.c index 5ef1cf2afb..83ecccca28 100644 --- a/hw/ppc/pnv_pnor.c +++ b/hw/ppc/pnv_pnor.c @@ -36,7 +36,7 @@ static void pnv_pnor_update(PnvPnor *s, int offset, int size) int offset_end; int ret; - if (s->blk) { + if (!s->blk || !blk_is_writable(s->blk)) { return; } diff --git a/hw/ppc/pnv_xscom.c b/hw/ppc/pnv_xscom.c index faa488e311..9ce018dbc2 100644 --- a/hw/ppc/pnv_xscom.c +++ b/hw/ppc/pnv_xscom.c @@ -284,6 +284,10 @@ int pnv_dt_xscom(PnvChip *chip, void *fdt, int root_offset, _FDT(xscom_offset); g_free(name); _FDT((fdt_setprop_cell(fdt, xscom_offset, "ibm,chip-id", chip->chip_id))); + /* + * On P10, the xscom bus id has been deprecated and the chip id is + * calculated from the "Primary topology table index". See skiboot. + */ _FDT((fdt_setprop_cell(fdt, xscom_offset, "ibm,primary-topology-index", chip->chip_id))); _FDT((fdt_setprop_cell(fdt, xscom_offset, "#address-cells", 1))); diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c index 7375bf4fa9..e8127599c9 100644 --- a/hw/ppc/ppc.c +++ b/hw/ppc/ppc.c @@ -37,22 +37,6 @@ #include "migration/vmstate.h" #include "trace.h" -//#define PPC_DEBUG_IRQ -//#define PPC_DEBUG_TB - -#ifdef PPC_DEBUG_IRQ -# define LOG_IRQ(...) qemu_log_mask(CPU_LOG_INT, ## __VA_ARGS__) -#else -# define LOG_IRQ(...) do { } while (0) -#endif - - -#ifdef PPC_DEBUG_TB -# define LOG_TB(...) qemu_log(__VA_ARGS__) -#else -# define LOG_TB(...) do { } while (0) -#endif - static void cpu_ppc_tb_stop (CPUPPCState *env); static void cpu_ppc_tb_start (CPUPPCState *env); @@ -86,9 +70,8 @@ void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level) } - LOG_IRQ("%s: %p n_IRQ %d level %d => pending %08" PRIx32 - "req %08x\n", __func__, env, n_IRQ, level, - env->pending_interrupts, CPU(cpu)->interrupt_request); + trace_ppc_irq_set_exit(env, n_IRQ, level, env->pending_interrupts, + CPU(cpu)->interrupt_request); if (locked) { qemu_mutex_unlock_iothread(); @@ -102,8 +85,8 @@ static void ppc6xx_set_irq(void *opaque, int pin, int level) CPUPPCState *env = &cpu->env; int cur_level; - LOG_IRQ("%s: env %p pin %d level %d\n", __func__, - env, pin, level); + trace_ppc_irq_set(env, pin, level); + cur_level = (env->irq_input_state >> pin) & 1; /* Don't generate spurious events */ if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { @@ -112,8 +95,7 @@ static void ppc6xx_set_irq(void *opaque, int pin, int level) switch (pin) { case PPC6xx_INPUT_TBEN: /* Level sensitive - active high */ - LOG_IRQ("%s: %s the time base\n", - __func__, level ? "start" : "stop"); + trace_ppc_irq_set_state("time base", level); if (level) { cpu_ppc_tb_start(env); } else { @@ -122,14 +104,12 @@ static void ppc6xx_set_irq(void *opaque, int pin, int level) break; case PPC6xx_INPUT_INT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the external IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("external IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); break; case PPC6xx_INPUT_SMI: /* Level sensitive - active high */ - LOG_IRQ("%s: set the SMI IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("SMI IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_SMI, level); break; case PPC6xx_INPUT_MCP: @@ -138,8 +118,7 @@ static void ppc6xx_set_irq(void *opaque, int pin, int level) * 603/604/740/750: check HID0[EMCP] */ if (cur_level == 1 && level == 0) { - LOG_IRQ("%s: raise machine check state\n", - __func__); + trace_ppc_irq_set_state("machine check", 1); ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1); } break; @@ -148,26 +127,23 @@ static void ppc6xx_set_irq(void *opaque, int pin, int level) /* XXX: TODO: relay the signal to CKSTP_OUT pin */ /* XXX: Note that the only way to restart the CPU is to reset it */ if (level) { - LOG_IRQ("%s: stop the CPU\n", __func__); + trace_ppc_irq_cpu("stop"); cs->halted = 1; } break; case PPC6xx_INPUT_HRESET: /* Level sensitive - active low */ if (level) { - LOG_IRQ("%s: reset the CPU\n", __func__); + trace_ppc_irq_reset("CPU"); cpu_interrupt(cs, CPU_INTERRUPT_RESET); } break; case PPC6xx_INPUT_SRESET: - LOG_IRQ("%s: set the RESET IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("RESET IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level); break; default: - /* Unknown pin - do nothing */ - LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); - return; + g_assert_not_reached(); } if (level) env->irq_input_state |= 1 << pin; @@ -192,8 +168,8 @@ static void ppc970_set_irq(void *opaque, int pin, int level) CPUPPCState *env = &cpu->env; int cur_level; - LOG_IRQ("%s: env %p pin %d level %d\n", __func__, - env, pin, level); + trace_ppc_irq_set(env, pin, level); + cur_level = (env->irq_input_state >> pin) & 1; /* Don't generate spurious events */ if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { @@ -202,14 +178,12 @@ static void ppc970_set_irq(void *opaque, int pin, int level) switch (pin) { case PPC970_INPUT_INT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the external IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("external IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); break; case PPC970_INPUT_THINT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the SMI IRQ state to %d\n", __func__, - level); + trace_ppc_irq_set_state("SMI IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_THERM, level); break; case PPC970_INPUT_MCP: @@ -218,8 +192,7 @@ static void ppc970_set_irq(void *opaque, int pin, int level) * 603/604/740/750: check HID0[EMCP] */ if (cur_level == 1 && level == 0) { - LOG_IRQ("%s: raise machine check state\n", - __func__); + trace_ppc_irq_set_state("machine check", 1); ppc_set_irq(cpu, PPC_INTERRUPT_MCK, 1); } break; @@ -227,10 +200,10 @@ static void ppc970_set_irq(void *opaque, int pin, int level) /* Level sensitive - active low */ /* XXX: TODO: relay the signal to CKSTP_OUT pin */ if (level) { - LOG_IRQ("%s: stop the CPU\n", __func__); + trace_ppc_irq_cpu("stop"); cs->halted = 1; } else { - LOG_IRQ("%s: restart the CPU\n", __func__); + trace_ppc_irq_cpu("restart"); cs->halted = 0; qemu_cpu_kick(cs); } @@ -242,19 +215,15 @@ static void ppc970_set_irq(void *opaque, int pin, int level) } break; case PPC970_INPUT_SRESET: - LOG_IRQ("%s: set the RESET IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("RESET IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_RESET, level); break; case PPC970_INPUT_TBEN: - LOG_IRQ("%s: set the TBEN state to %d\n", __func__, - level); + trace_ppc_irq_set_state("TBEN IRQ", level); /* XXX: TODO */ break; default: - /* Unknown pin - do nothing */ - LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); - return; + g_assert_not_reached(); } if (level) env->irq_input_state |= 1 << pin; @@ -276,20 +245,16 @@ static void power7_set_irq(void *opaque, int pin, int level) { PowerPCCPU *cpu = opaque; - LOG_IRQ("%s: env %p pin %d level %d\n", __func__, - &cpu->env, pin, level); + trace_ppc_irq_set(&cpu->env, pin, level); switch (pin) { case POWER7_INPUT_INT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the external IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("external IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); break; default: - /* Unknown pin - do nothing */ - LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); - return; + g_assert_not_reached(); } } @@ -306,25 +271,21 @@ static void power9_set_irq(void *opaque, int pin, int level) { PowerPCCPU *cpu = opaque; - LOG_IRQ("%s: env %p pin %d level %d\n", __func__, - &cpu->env, pin, level); + trace_ppc_irq_set(&cpu->env, pin, level); switch (pin) { case POWER9_INPUT_INT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the external IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("external IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); break; case POWER9_INPUT_HINT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the external IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("HV external IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_HVIRT, level); break; default: - /* Unknown pin - do nothing */ - LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); + g_assert_not_reached(); return; } } @@ -375,6 +336,8 @@ void store_40x_dbcr0(CPUPPCState *env, uint32_t val) { PowerPCCPU *cpu = env_archcpu(env); + qemu_mutex_lock_iothread(); + switch ((val >> 28) & 0x3) { case 0x0: /* No action */ @@ -392,6 +355,8 @@ void store_40x_dbcr0(CPUPPCState *env, uint32_t val) ppc40x_system_reset(cpu); break; } + + qemu_mutex_unlock_iothread(); } /* PowerPC 40x internal IRQ controller */ @@ -401,8 +366,8 @@ static void ppc40x_set_irq(void *opaque, int pin, int level) CPUPPCState *env = &cpu->env; int cur_level; - LOG_IRQ("%s: env %p pin %d level %d\n", __func__, - env, pin, level); + trace_ppc_irq_set(env, pin, level); + cur_level = (env->irq_input_state >> pin) & 1; /* Don't generate spurious events */ if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { @@ -411,57 +376,51 @@ static void ppc40x_set_irq(void *opaque, int pin, int level) switch (pin) { case PPC40x_INPUT_RESET_SYS: if (level) { - LOG_IRQ("%s: reset the PowerPC system\n", - __func__); + trace_ppc_irq_reset("system"); ppc40x_system_reset(cpu); } break; case PPC40x_INPUT_RESET_CHIP: if (level) { - LOG_IRQ("%s: reset the PowerPC chip\n", __func__); + trace_ppc_irq_reset("chip"); ppc40x_chip_reset(cpu); } break; case PPC40x_INPUT_RESET_CORE: /* XXX: TODO: update DBSR[MRR] */ if (level) { - LOG_IRQ("%s: reset the PowerPC core\n", __func__); + trace_ppc_irq_reset("core"); ppc40x_core_reset(cpu); } break; case PPC40x_INPUT_CINT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the critical IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("critical IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level); break; case PPC40x_INPUT_INT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the external IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("external IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); break; case PPC40x_INPUT_HALT: /* Level sensitive - active low */ if (level) { - LOG_IRQ("%s: stop the CPU\n", __func__); + trace_ppc_irq_cpu("stop"); cs->halted = 1; } else { - LOG_IRQ("%s: restart the CPU\n", __func__); + trace_ppc_irq_cpu("restart"); cs->halted = 0; qemu_cpu_kick(cs); } break; case PPC40x_INPUT_DEBUG: /* Level sensitive - active high */ - LOG_IRQ("%s: set the debug pin state to %d\n", - __func__, level); + trace_ppc_irq_set_state("debug pin", level); ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level); break; default: - /* Unknown pin - do nothing */ - LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); - return; + g_assert_not_reached(); } if (level) env->irq_input_state |= 1 << pin; @@ -485,47 +444,41 @@ static void ppce500_set_irq(void *opaque, int pin, int level) CPUPPCState *env = &cpu->env; int cur_level; - LOG_IRQ("%s: env %p pin %d level %d\n", __func__, - env, pin, level); + trace_ppc_irq_set(env, pin, level); + cur_level = (env->irq_input_state >> pin) & 1; /* Don't generate spurious events */ if ((cur_level == 1 && level == 0) || (cur_level == 0 && level != 0)) { switch (pin) { case PPCE500_INPUT_MCK: if (level) { - LOG_IRQ("%s: reset the PowerPC system\n", - __func__); + trace_ppc_irq_reset("system"); qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); } break; case PPCE500_INPUT_RESET_CORE: if (level) { - LOG_IRQ("%s: reset the PowerPC core\n", __func__); + trace_ppc_irq_reset("core"); ppc_set_irq(cpu, PPC_INTERRUPT_MCK, level); } break; case PPCE500_INPUT_CINT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the critical IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("critical IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_CEXT, level); break; case PPCE500_INPUT_INT: /* Level sensitive - active high */ - LOG_IRQ("%s: set the core IRQ state to %d\n", - __func__, level); + trace_ppc_irq_set_state("core IRQ", level); ppc_set_irq(cpu, PPC_INTERRUPT_EXT, level); break; case PPCE500_INPUT_DEBUG: /* Level sensitive - active high */ - LOG_IRQ("%s: set the debug pin state to %d\n", - __func__, level); + trace_ppc_irq_set_state("debug pin", level); ppc_set_irq(cpu, PPC_INTERRUPT_DEBUG, level); break; default: - /* Unknown pin - do nothing */ - LOG_IRQ("%s: unknown IRQ pin %d\n", __func__, pin); - return; + g_assert_not_reached(); } if (level) env->irq_input_state |= 1 << pin; @@ -576,7 +529,7 @@ uint64_t cpu_ppc_load_tbl (CPUPPCState *env) } tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); - LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); + trace_ppc_tb_load(tb); return tb; } @@ -587,7 +540,7 @@ static inline uint32_t _cpu_ppc_load_tbu(CPUPPCState *env) uint64_t tb; tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->tb_offset); - LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); + trace_ppc_tb_load(tb); return tb >> 32; } @@ -607,8 +560,7 @@ static inline void cpu_ppc_store_tb(ppc_tb_t *tb_env, uint64_t vmclk, *tb_offsetp = value - muldiv64(vmclk, tb_env->tb_freq, NANOSECONDS_PER_SECOND); - LOG_TB("%s: tb %016" PRIx64 " offset %08" PRIx64 "\n", - __func__, value, *tb_offsetp); + trace_ppc_tb_store(value, *tb_offsetp); } void cpu_ppc_store_tbl (CPUPPCState *env, uint32_t value) @@ -644,7 +596,7 @@ uint64_t cpu_ppc_load_atbl (CPUPPCState *env) uint64_t tb; tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); - LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); + trace_ppc_tb_load(tb); return tb; } @@ -655,7 +607,7 @@ uint32_t cpu_ppc_load_atbu (CPUPPCState *env) uint64_t tb; tb = cpu_ppc_get_tb(tb_env, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), tb_env->atb_offset); - LOG_TB("%s: tb %016" PRIx64 "\n", __func__, tb); + trace_ppc_tb_load(tb); return tb >> 32; } @@ -774,7 +726,7 @@ static inline int64_t _cpu_ppc_load_decr(CPUPPCState *env, uint64_t next) } else { decr = -muldiv64(-diff, tb_env->decr_freq, NANOSECONDS_PER_SECOND); } - LOG_TB("%s: %016" PRIx64 "\n", __func__, decr); + trace_ppc_decr_load(decr); return decr; } @@ -833,7 +785,7 @@ uint64_t cpu_ppc_load_purr (CPUPPCState *env) static inline void cpu_ppc_decr_excp(PowerPCCPU *cpu) { /* Raise it */ - LOG_TB("raise decrementer exception\n"); + trace_ppc_decr_excp("raise"); ppc_set_irq(cpu, PPC_INTERRUPT_DECR, 1); } @@ -847,7 +799,7 @@ static inline void cpu_ppc_hdecr_excp(PowerPCCPU *cpu) CPUPPCState *env = &cpu->env; /* Raise it */ - LOG_TB("raise hv decrementer exception\n"); + trace_ppc_decr_excp("raise HV"); /* The architecture specifies that we don't deliver HDEC * interrupts in a PM state. Not only they don't cause a @@ -873,17 +825,14 @@ static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp, CPUPPCState *env = &cpu->env; ppc_tb_t *tb_env = env->tb_env; uint64_t now, next; - bool negative; + int64_t signed_value; + int64_t signed_decr; /* Truncate value to decr_width and sign extend for simplicity */ - value &= ((1ULL << nr_bits) - 1); - negative = !!(value & (1ULL << (nr_bits - 1))); - if (negative) { - value |= (0xFFFFFFFFULL << nr_bits); - } + signed_value = sextract64(value, 0, nr_bits); + signed_decr = sextract64(decr, 0, nr_bits); - LOG_TB("%s: " TARGET_FMT_lx " => " TARGET_FMT_lx "\n", __func__, - decr, value); + trace_ppc_decr_store(nr_bits, decr, value); if (kvm_enabled()) { /* KVM handles decrementer exceptions, we don't need our own timer */ @@ -904,15 +853,15 @@ static void __cpu_ppc_store_decr(PowerPCCPU *cpu, uint64_t *nextp, * an edge interrupt, so raise it here too. */ if ((value < 3) || - ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && negative) || - ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && negative - && !(decr & (1ULL << (nr_bits - 1))))) { + ((tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL) && signed_value < 0) || + ((tb_env->flags & PPC_DECR_UNDERFLOW_TRIGGERED) && signed_value < 0 + && signed_decr >= 0)) { (*raise_excp)(cpu); return; } /* On MSB level based systems a 0 for the MSB stops interrupt delivery */ - if (!negative && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) { + if (signed_value >= 0 && (tb_env->flags & PPC_DECR_UNDERFLOW_LEVEL)) { (*lower_excp)(cpu); } @@ -1211,9 +1160,8 @@ static void cpu_4xx_fit_cb (void *opaque) if ((env->spr[SPR_40x_TCR] >> 23) & 0x1) { ppc_set_irq(cpu, PPC_INTERRUPT_FIT, 1); } - LOG_TB("%s: ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__, - (int)((env->spr[SPR_40x_TCR] >> 23) & 0x1), - env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]); + trace_ppc4xx_fit((int)((env->spr[SPR_40x_TCR] >> 23) & 0x1), + env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]); } /* Programmable interval timer */ @@ -1227,11 +1175,10 @@ static void start_stop_pit (CPUPPCState *env, ppc_tb_t *tb_env, int is_excp) !((env->spr[SPR_40x_TCR] >> 26) & 0x1) || (is_excp && !((env->spr[SPR_40x_TCR] >> 22) & 0x1))) { /* Stop PIT */ - LOG_TB("%s: stop PIT\n", __func__); + trace_ppc4xx_pit_stop(); timer_del(tb_env->decr_timer); } else { - LOG_TB("%s: start PIT %016" PRIx64 "\n", - __func__, ppc40x_timer->pit_reload); + trace_ppc4xx_pit_start(ppc40x_timer->pit_reload); now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); next = now + muldiv64(ppc40x_timer->pit_reload, NANOSECONDS_PER_SECOND, tb_env->decr_freq); @@ -1260,9 +1207,7 @@ static void cpu_4xx_pit_cb (void *opaque) ppc_set_irq(cpu, ppc40x_timer->decr_excp, 1); } start_stop_pit(env, tb_env, 1); - LOG_TB("%s: ar %d ir %d TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx " " - "%016" PRIx64 "\n", __func__, - (int)((env->spr[SPR_40x_TCR] >> 22) & 0x1), + trace_ppc4xx_pit((int)((env->spr[SPR_40x_TCR] >> 22) & 0x1), (int)((env->spr[SPR_40x_TCR] >> 26) & 0x1), env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR], ppc40x_timer->pit_reload); @@ -1302,8 +1247,7 @@ static void cpu_4xx_wdt_cb (void *opaque) next = now + muldiv64(next, NANOSECONDS_PER_SECOND, tb_env->decr_freq); if (next == now) next++; - LOG_TB("%s: TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx "\n", __func__, - env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]); + trace_ppc4xx_wdt(env->spr[SPR_40x_TCR], env->spr[SPR_40x_TSR]); switch ((env->spr[SPR_40x_TSR] >> 30) & 0x3) { case 0x0: case 0x1: @@ -1346,7 +1290,7 @@ void store_40x_pit (CPUPPCState *env, target_ulong val) tb_env = env->tb_env; ppc40x_timer = tb_env->opaque; - LOG_TB("%s val" TARGET_FMT_lx "\n", __func__, val); + trace_ppc40x_store_pit(val); ppc40x_timer->pit_reload = val; start_stop_pit(env, tb_env, 0); } @@ -1361,8 +1305,7 @@ static void ppc_40x_set_tb_clk (void *opaque, uint32_t freq) CPUPPCState *env = opaque; ppc_tb_t *tb_env = env->tb_env; - LOG_TB("%s set new frequency to %" PRIu32 "\n", __func__, - freq); + trace_ppc40x_set_tb_clk(freq); tb_env->tb_freq = freq; tb_env->decr_freq = freq; /* XXX: we should also update all timers */ @@ -1381,7 +1324,7 @@ clk_setup_cb ppc_40x_timers_init (CPUPPCState *env, uint32_t freq, tb_env->tb_freq = freq; tb_env->decr_freq = freq; tb_env->opaque = ppc40x_timer; - LOG_TB("%s freq %" PRIu32 "\n", __func__, freq); + trace_ppc40x_timers_init(freq); if (ppc40x_timer != NULL) { /* We use decr timer for PIT */ tb_env->decr_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &cpu_4xx_pit_cb, env); diff --git a/hw/ppc/ppc4xx_pci.c b/hw/ppc/ppc4xx_pci.c index 8147ba6f94..304a29349c 100644 --- a/hw/ppc/ppc4xx_pci.c +++ b/hw/ppc/ppc4xx_pci.c @@ -48,12 +48,14 @@ OBJECT_DECLARE_SIMPLE_TYPE(PPC4xxPCIState, PPC4xx_PCI_HOST_BRIDGE) #define PPC4xx_PCI_NR_PMMS 3 #define PPC4xx_PCI_NR_PTMS 2 +#define PPC4xx_PCI_NUM_DEVS 5 + struct PPC4xxPCIState { PCIHostState parent_obj; struct PCIMasterMap pmm[PPC4xx_PCI_NR_PMMS]; struct PCITargetMap ptm[PPC4xx_PCI_NR_PTMS]; - qemu_irq irq[PCI_NUM_PINS]; + qemu_irq irq[PPC4xx_PCI_NUM_DEVS]; MemoryRegion container; MemoryRegion iomem; @@ -246,7 +248,7 @@ static int ppc4xx_pci_map_irq(PCIDevice *pci_dev, int irq_num) trace_ppc4xx_pci_map_irq(pci_dev->devfn, irq_num, slot); - return slot - 1; + return slot > 0 ? slot - 1 : PPC4xx_PCI_NUM_DEVS - 1; } static void ppc4xx_pci_set_irq(void *opaque, int irq_num, int level) @@ -254,7 +256,7 @@ static void ppc4xx_pci_set_irq(void *opaque, int irq_num, int level) qemu_irq *pci_irqs = opaque; trace_ppc4xx_pci_set_irq(irq_num); - assert(irq_num >= 0); + assert(irq_num >= 0 && irq_num < PPC4xx_PCI_NUM_DEVS); qemu_set_irq(pci_irqs[irq_num], level); } diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index d39fd4e644..163c90388a 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -29,6 +29,7 @@ #include "qemu/datadir.h" #include "qapi/error.h" #include "qapi/qapi-events-machine.h" +#include "qapi/qapi-events-qdev.h" #include "qapi/visitor.h" #include "sysemu/sysemu.h" #include "sysemu/hostmem.h" @@ -2752,6 +2753,11 @@ static void spapr_machine_init(MachineState *machine) spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY); + /* Do not advertise FORM2 NUMA support for pseries-6.1 and older */ + if (!smc->pre_6_2_numa_affinity) { + spapr_ovec_set(spapr->ov5, OV5_FORM2_AFFINITY); + } + /* advertise support for dedicated HP event source to guests */ if (spapr->use_hotplug_event_source) { spapr_ovec_set(spapr->ov5, OV5_HP_EVT); @@ -2773,39 +2779,6 @@ static void spapr_machine_init(MachineState *machine) /* init CPUs */ spapr_init_cpus(spapr); - /* - * check we don't have a memory-less/cpu-less NUMA node - * Firmware relies on the existing memory/cpu topology to provide the - * NUMA topology to the kernel. - * And the linux kernel needs to know the NUMA topology at start - * to be able to hotplug CPUs later. - */ - if (machine->numa_state->num_nodes) { - for (i = 0; i < machine->numa_state->num_nodes; ++i) { - /* check for memory-less node */ - if (machine->numa_state->nodes[i].node_mem == 0) { - CPUState *cs; - int found = 0; - /* check for cpu-less node */ - CPU_FOREACH(cs) { - PowerPCCPU *cpu = POWERPC_CPU(cs); - if (cpu->node_id == i) { - found = 1; - break; - } - } - /* memory-less and cpu-less node */ - if (!found) { - error_report( - "Memory-less/cpu-less nodes are not supported (node %d)", - i); - exit(1); - } - } - } - - } - spapr->gpu_numa_id = spapr_numa_initial_nvgpu_numa_id(machine); /* Init numa_assoc_array */ @@ -3686,11 +3659,18 @@ void spapr_memory_unplug_rollback(SpaprMachineState *spapr, DeviceState *dev) /* * Tell QAPI that something happened and the memory - * hotunplug wasn't successful. + * hotunplug wasn't successful. Keep sending + * MEM_UNPLUG_ERROR even while sending + * DEVICE_UNPLUG_GUEST_ERROR until the deprecation of + * MEM_UNPLUG_ERROR is due. */ qapi_error = g_strdup_printf("Memory hotunplug rejected by the guest " "for device %s", dev->id); - qapi_event_send_mem_unplug_error(dev->id, qapi_error); + + qapi_event_send_mem_unplug_error(dev->id ? : "", qapi_error); + + qapi_event_send_device_unplug_guest_error(!!dev->id, dev->id, + dev->canonical_path); } /* Callback to be called during DRC release. */ @@ -4700,8 +4680,12 @@ DEFINE_SPAPR_MACHINE(6_2, "6.2", true); */ static void spapr_machine_6_1_class_options(MachineClass *mc) { + SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); + spapr_machine_6_2_class_options(mc); compat_props_add(mc->compat_props, hw_compat_6_1, hw_compat_6_1_len); + smc->pre_6_2_numa_affinity = true; + mc->smp_props.prefer_sockets = true; } DEFINE_SPAPR_MACHINE(6_1, "6.1", false); diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c index 4f316a6f9d..58e7341cb7 100644 --- a/hw/ppc/spapr_cpu_core.c +++ b/hw/ppc/spapr_cpu_core.c @@ -382,6 +382,7 @@ static const TypeInfo spapr_cpu_core_type_infos[] = { DEFINE_SPAPR_CPU_CORE_TYPE("power9_v1.0"), DEFINE_SPAPR_CPU_CORE_TYPE("power9_v2.0"), DEFINE_SPAPR_CPU_CORE_TYPE("power10_v1.0"), + DEFINE_SPAPR_CPU_CORE_TYPE("power10_v2.0"), #ifdef CONFIG_KVM DEFINE_SPAPR_CPU_CORE_TYPE("host"), #endif diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c index a2f2634601..f8ac0a10df 100644 --- a/hw/ppc/spapr_drc.c +++ b/hw/ppc/spapr_drc.c @@ -17,6 +17,8 @@ #include "hw/ppc/spapr_drc.h" #include "qom/object.h" #include "migration/vmstate.h" +#include "qapi/error.h" +#include "qapi/qapi-events-qdev.h" #include "qapi/visitor.h" #include "qemu/error-report.h" #include "hw/ppc/spapr.h" /* for RTAS return codes */ @@ -167,13 +169,15 @@ static uint32_t drc_unisolate_logical(SpaprDrc *drc) } drc->unplug_requested = false; - error_report("Device hotunplug rejected by the guest " - "for device %s", drc->dev->id); - /* - * TODO: send a QAPI DEVICE_UNPLUG_ERROR event when - * it is implemented. - */ + if (drc->dev->id) { + error_report("Device hotunplug rejected by the guest " + "for device %s", drc->dev->id); + } + + qapi_event_send_device_unplug_guest_error(!!drc->dev->id, + drc->dev->id, + drc->dev->canonical_path); } return RTAS_OUT_SUCCESS; /* Nothing to do */ diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index 0e9a5b2e40..222c1b6bbd 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -17,6 +17,7 @@ #include "kvm_ppc.h" #include "hw/ppc/fdt.h" #include "hw/ppc/spapr_ovec.h" +#include "hw/ppc/spapr_numa.h" #include "mmu-book3s-v3.h" #include "hw/mem/memory-device.h" @@ -1197,6 +1198,12 @@ target_ulong do_client_architecture_support(PowerPCCPU *cpu, spapr->cas_pre_isa3_guest = !spapr_ovec_test(ov1_guest, OV1_PPC_3_00); spapr_ovec_cleanup(ov1_guest); + /* + * Check for NUMA affinity conditions now that we know which NUMA + * affinity the guest will use. + */ + spapr_numa_associativity_check(spapr); + /* * Ensure the guest asks for an interrupt mode we support; * otherwise terminate the boot. diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c index 779f18b994..e9ef7e7646 100644 --- a/hw/ppc/spapr_numa.c +++ b/hw/ppc/spapr_numa.c @@ -19,25 +19,88 @@ /* Moved from hw/ppc/spapr_pci_nvlink2.c */ #define SPAPR_GPU_NUMA_ID (cpu_to_be32(1)) -static bool spapr_machine_using_legacy_numa(SpaprMachineState *spapr) +/* + * Retrieves max_dist_ref_points of the current NUMA affinity. + */ +static int get_max_dist_ref_points(SpaprMachineState *spapr) { - MachineState *machine = MACHINE(spapr); - SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine); + if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) { + return FORM2_DIST_REF_POINTS; + } - return smc->pre_5_2_numa_associativity || - machine->numa_state->num_nodes <= 1; + return FORM1_DIST_REF_POINTS; +} + +/* + * Retrieves numa_assoc_size of the current NUMA affinity. + */ +static int get_numa_assoc_size(SpaprMachineState *spapr) +{ + if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) { + return FORM2_NUMA_ASSOC_SIZE; + } + + return FORM1_NUMA_ASSOC_SIZE; +} + +/* + * Retrieves vcpu_assoc_size of the current NUMA affinity. + * + * vcpu_assoc_size is the size of ibm,associativity array + * for CPUs, which has an extra element (vcpu_id) in the end. + */ +static int get_vcpu_assoc_size(SpaprMachineState *spapr) +{ + return get_numa_assoc_size(spapr) + 1; +} + +/* + * Retrieves the ibm,associativity array of NUMA node 'node_id' + * for the current NUMA affinity. + */ +static const uint32_t *get_associativity(SpaprMachineState *spapr, int node_id) +{ + if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) { + return spapr->FORM2_assoc_array[node_id]; + } + return spapr->FORM1_assoc_array[node_id]; +} + +/* + * Wrapper that returns node distance from ms->numa_state->nodes + * after handling edge cases where the distance might be absent. + */ +static int get_numa_distance(MachineState *ms, int src, int dst) +{ + NodeInfo *numa_info = ms->numa_state->nodes; + int ret = numa_info[src].distance[dst]; + + if (ret != 0) { + return ret; + } + + /* + * In case QEMU adds a default NUMA single node when the user + * did not add any, or where the user did not supply distances, + * the distance will be absent (zero). Return local/remote + * distance in this case. + */ + if (src == dst) { + return NUMA_DISTANCE_MIN; + } + + return NUMA_DISTANCE_DEFAULT; } static bool spapr_numa_is_symmetrical(MachineState *ms) { - int src, dst; int nb_numa_nodes = ms->numa_state->num_nodes; - NodeInfo *numa_info = ms->numa_state->nodes; + int src, dst; for (src = 0; src < nb_numa_nodes; src++) { for (dst = src; dst < nb_numa_nodes; dst++) { - if (numa_info[src].distance[dst] != - numa_info[dst].distance[src]) { + if (get_numa_distance(ms, src, dst) != + get_numa_distance(ms, dst, src)) { return false; } } @@ -92,12 +155,22 @@ static uint8_t spapr_numa_get_numa_level(uint8_t distance) return 0; } -static void spapr_numa_define_associativity_domains(SpaprMachineState *spapr) +static void spapr_numa_define_FORM1_domains(SpaprMachineState *spapr) { MachineState *ms = MACHINE(spapr); - NodeInfo *numa_info = ms->numa_state->nodes; int nb_numa_nodes = ms->numa_state->num_nodes; - int src, dst, i; + int src, dst, i, j; + + /* + * Fill all associativity domains of non-zero NUMA nodes with + * node_id. This is required because the default value (0) is + * considered a match with associativity domains of node 0. + */ + for (i = 1; i < nb_numa_nodes; i++) { + for (j = 1; j < FORM1_DIST_REF_POINTS; j++) { + spapr->FORM1_assoc_array[i][j] = cpu_to_be32(i); + } + } for (src = 0; src < nb_numa_nodes; src++) { for (dst = src; dst < nb_numa_nodes; dst++) { @@ -121,7 +194,7 @@ static void spapr_numa_define_associativity_domains(SpaprMachineState *spapr) * The PPC kernel expects the associativity domains of node 0 to * be always 0, and this algorithm will grant that by default. */ - uint8_t distance = numa_info[src].distance[dst]; + uint8_t distance = get_numa_distance(ms, src, dst); uint8_t n_level = spapr_numa_get_numa_level(distance); uint32_t assoc_src; @@ -132,7 +205,7 @@ static void spapr_numa_define_associativity_domains(SpaprMachineState *spapr) * * The Linux kernel will assume that the distance between src and * dst, in this case of no match, is 10 (local distance) doubled - * for each NUMA it didn't match. We have MAX_DISTANCE_REF_POINTS + * for each NUMA it didn't match. We have FORM1_DIST_REF_POINTS * levels (4), so this gives us 10*2*2*2*2 = 160. * * This logic can be seen in the Linux kernel source code, as of @@ -147,25 +220,69 @@ static void spapr_numa_define_associativity_domains(SpaprMachineState *spapr) * and going up to 0x1. */ for (i = n_level; i > 0; i--) { - assoc_src = spapr->numa_assoc_array[src][i]; - spapr->numa_assoc_array[dst][i] = assoc_src; + assoc_src = spapr->FORM1_assoc_array[src][i]; + spapr->FORM1_assoc_array[dst][i] = assoc_src; } } } } -void spapr_numa_associativity_init(SpaprMachineState *spapr, - MachineState *machine) +static void spapr_numa_FORM1_affinity_check(MachineState *machine) +{ + int i; + + /* + * Check we don't have a memory-less/cpu-less NUMA node + * Firmware relies on the existing memory/cpu topology to provide the + * NUMA topology to the kernel. + * And the linux kernel needs to know the NUMA topology at start + * to be able to hotplug CPUs later. + */ + if (machine->numa_state->num_nodes) { + for (i = 0; i < machine->numa_state->num_nodes; ++i) { + /* check for memory-less node */ + if (machine->numa_state->nodes[i].node_mem == 0) { + CPUState *cs; + int found = 0; + /* check for cpu-less node */ + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + if (cpu->node_id == i) { + found = 1; + break; + } + } + /* memory-less and cpu-less node */ + if (!found) { + error_report( +"Memory-less/cpu-less nodes are not supported with FORM1 NUMA (node %d)", i); + exit(EXIT_FAILURE); + } + } + } + } + + if (!spapr_numa_is_symmetrical(machine)) { + error_report( +"Asymmetrical NUMA topologies aren't supported in the pSeries machine using FORM1 NUMA"); + exit(EXIT_FAILURE); + } +} + +/* + * Set NUMA machine state data based on FORM1 affinity semantics. + */ +static void spapr_numa_FORM1_affinity_init(SpaprMachineState *spapr, + MachineState *machine) { SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); int nb_numa_nodes = machine->numa_state->num_nodes; int i, j, max_nodes_with_gpus; - bool using_legacy_numa = spapr_machine_using_legacy_numa(spapr); /* * For all associativity arrays: first position is the size, - * position MAX_DISTANCE_REF_POINTS is always the numa_id, + * position FORM1_DIST_REF_POINTS is always the numa_id, * represented by the index 'i'. * * This will break on sparse NUMA setups, when/if QEMU starts @@ -173,19 +290,8 @@ void spapr_numa_associativity_init(SpaprMachineState *spapr, * 'i' will be a valid node_id set by the user. */ for (i = 0; i < nb_numa_nodes; i++) { - spapr->numa_assoc_array[i][0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS); - spapr->numa_assoc_array[i][MAX_DISTANCE_REF_POINTS] = cpu_to_be32(i); - - /* - * Fill all associativity domains of non-zero NUMA nodes with - * node_id. This is required because the default value (0) is - * considered a match with associativity domains of node 0. - */ - if (!using_legacy_numa && i != 0) { - for (j = 1; j < MAX_DISTANCE_REF_POINTS; j++) { - spapr->numa_assoc_array[i][j] = cpu_to_be32(i); - } - } + spapr->FORM1_assoc_array[i][0] = cpu_to_be32(FORM1_DIST_REF_POINTS); + spapr->FORM1_assoc_array[i][FORM1_DIST_REF_POINTS] = cpu_to_be32(i); } /* @@ -199,47 +305,95 @@ void spapr_numa_associativity_init(SpaprMachineState *spapr, max_nodes_with_gpus = nb_numa_nodes + NVGPU_MAX_NUM; for (i = nb_numa_nodes; i < max_nodes_with_gpus; i++) { - spapr->numa_assoc_array[i][0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS); + spapr->FORM1_assoc_array[i][0] = cpu_to_be32(FORM1_DIST_REF_POINTS); - for (j = 1; j < MAX_DISTANCE_REF_POINTS; j++) { + for (j = 1; j < FORM1_DIST_REF_POINTS; j++) { uint32_t gpu_assoc = smc->pre_5_1_assoc_refpoints ? SPAPR_GPU_NUMA_ID : cpu_to_be32(i); - spapr->numa_assoc_array[i][j] = gpu_assoc; + spapr->FORM1_assoc_array[i][j] = gpu_assoc; } - spapr->numa_assoc_array[i][MAX_DISTANCE_REF_POINTS] = cpu_to_be32(i); + spapr->FORM1_assoc_array[i][FORM1_DIST_REF_POINTS] = cpu_to_be32(i); } /* - * Legacy NUMA guests (pseries-5.1 and older, or guests with only - * 1 NUMA node) will not benefit from anything we're going to do - * after this point. + * Guests pseries-5.1 and older uses zeroed associativity domains, + * i.e. no domain definition based on NUMA distance input. + * + * Same thing with guests that have only one NUMA node. */ - if (using_legacy_numa) { + if (smc->pre_5_2_numa_associativity || + machine->numa_state->num_nodes <= 1) { return; } - if (!spapr_numa_is_symmetrical(machine)) { - error_report("Asymmetrical NUMA topologies aren't supported " - "in the pSeries machine"); - exit(EXIT_FAILURE); + spapr_numa_define_FORM1_domains(spapr); +} + +/* + * Init NUMA FORM2 machine state data + */ +static void spapr_numa_FORM2_affinity_init(SpaprMachineState *spapr) +{ + int i; + + /* + * For all resources but CPUs, FORM2 associativity arrays will + * be a size 2 array with the following format: + * + * ibm,associativity = {1, numa_id} + * + * CPUs will write an additional 'vcpu_id' on top of the arrays + * being initialized here. 'numa_id' is represented by the + * index 'i' of the loop. + * + * Given that this initialization is also valid for GPU associativity + * arrays, handle everything in one single step by populating the + * arrays up to NUMA_NODES_MAX_NUM. + */ + for (i = 0; i < NUMA_NODES_MAX_NUM; i++) { + spapr->FORM2_assoc_array[i][0] = cpu_to_be32(1); + spapr->FORM2_assoc_array[i][1] = cpu_to_be32(i); + } +} + +void spapr_numa_associativity_init(SpaprMachineState *spapr, + MachineState *machine) +{ + spapr_numa_FORM1_affinity_init(spapr, machine); + spapr_numa_FORM2_affinity_init(spapr); +} + +void spapr_numa_associativity_check(SpaprMachineState *spapr) +{ + /* + * FORM2 does not have any restrictions we need to handle + * at CAS time, for now. + */ + if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) { + return; } - spapr_numa_define_associativity_domains(spapr); + spapr_numa_FORM1_affinity_check(MACHINE(spapr)); } void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt, int offset, int nodeid) { + const uint32_t *associativity = get_associativity(spapr, nodeid); + _FDT((fdt_setprop(fdt, offset, "ibm,associativity", - spapr->numa_assoc_array[nodeid], - sizeof(spapr->numa_assoc_array[nodeid])))); + associativity, + get_numa_assoc_size(spapr) * sizeof(uint32_t)))); } static uint32_t *spapr_numa_get_vcpu_assoc(SpaprMachineState *spapr, PowerPCCPU *cpu) { - uint32_t *vcpu_assoc = g_new(uint32_t, VCPU_ASSOC_SIZE); + const uint32_t *associativity = get_associativity(spapr, cpu->node_id); + int max_distance_ref_points = get_max_dist_ref_points(spapr); + int vcpu_assoc_size = get_vcpu_assoc_size(spapr); + uint32_t *vcpu_assoc = g_new(uint32_t, vcpu_assoc_size); int index = spapr_get_vcpu_id(cpu); /* @@ -248,10 +402,10 @@ static uint32_t *spapr_numa_get_vcpu_assoc(SpaprMachineState *spapr, * 0, put cpu_id last, then copy the remaining associativity * domains. */ - vcpu_assoc[0] = cpu_to_be32(MAX_DISTANCE_REF_POINTS + 1); - vcpu_assoc[VCPU_ASSOC_SIZE - 1] = cpu_to_be32(index); - memcpy(vcpu_assoc + 1, spapr->numa_assoc_array[cpu->node_id] + 1, - (VCPU_ASSOC_SIZE - 2) * sizeof(uint32_t)); + vcpu_assoc[0] = cpu_to_be32(max_distance_ref_points + 1); + vcpu_assoc[vcpu_assoc_size - 1] = cpu_to_be32(index); + memcpy(vcpu_assoc + 1, associativity + 1, + (vcpu_assoc_size - 2) * sizeof(uint32_t)); return vcpu_assoc; } @@ -260,12 +414,13 @@ int spapr_numa_fixup_cpu_dt(SpaprMachineState *spapr, void *fdt, int offset, PowerPCCPU *cpu) { g_autofree uint32_t *vcpu_assoc = NULL; + int vcpu_assoc_size = get_vcpu_assoc_size(spapr); vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, cpu); /* Advertise NUMA via ibm,associativity */ return fdt_setprop(fdt, offset, "ibm,associativity", vcpu_assoc, - VCPU_ASSOC_SIZE * sizeof(uint32_t)); + vcpu_assoc_size * sizeof(uint32_t)); } @@ -273,27 +428,28 @@ int spapr_numa_write_assoc_lookup_arrays(SpaprMachineState *spapr, void *fdt, int offset) { MachineState *machine = MACHINE(spapr); + int max_distance_ref_points = get_max_dist_ref_points(spapr); int nb_numa_nodes = machine->numa_state->num_nodes; int nr_nodes = nb_numa_nodes ? nb_numa_nodes : 1; uint32_t *int_buf, *cur_index, buf_len; int ret, i; /* ibm,associativity-lookup-arrays */ - buf_len = (nr_nodes * MAX_DISTANCE_REF_POINTS + 2) * sizeof(uint32_t); + buf_len = (nr_nodes * max_distance_ref_points + 2) * sizeof(uint32_t); cur_index = int_buf = g_malloc0(buf_len); int_buf[0] = cpu_to_be32(nr_nodes); /* Number of entries per associativity list */ - int_buf[1] = cpu_to_be32(MAX_DISTANCE_REF_POINTS); + int_buf[1] = cpu_to_be32(max_distance_ref_points); cur_index += 2; for (i = 0; i < nr_nodes; i++) { /* - * For the lookup-array we use the ibm,associativity array, - * from numa_assoc_array. without the first element (size). + * For the lookup-array we use the ibm,associativity array of the + * current NUMA affinity, without the first element (size). */ - uint32_t *associativity = spapr->numa_assoc_array[i]; + const uint32_t *associativity = get_associativity(spapr, i); memcpy(cur_index, ++associativity, - sizeof(uint32_t) * MAX_DISTANCE_REF_POINTS); - cur_index += MAX_DISTANCE_REF_POINTS; + sizeof(uint32_t) * max_distance_ref_points); + cur_index += max_distance_ref_points; } ret = fdt_setprop(fdt, offset, "ibm,associativity-lookup-arrays", int_buf, (cur_index - int_buf) * sizeof(uint32_t)); @@ -302,12 +458,8 @@ int spapr_numa_write_assoc_lookup_arrays(SpaprMachineState *spapr, void *fdt, return ret; } -/* - * Helper that writes ibm,associativity-reference-points and - * max-associativity-domains in the RTAS pointed by @rtas - * in the DT @fdt. - */ -void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas) +static void spapr_numa_FORM1_write_rtas_dt(SpaprMachineState *spapr, + void *fdt, int rtas) { MachineState *ms = MACHINE(spapr); SpaprMachineClass *smc = SPAPR_MACHINE_GET_CLASS(spapr); @@ -329,7 +481,8 @@ void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas) cpu_to_be32(maxdomain) }; - if (spapr_machine_using_legacy_numa(spapr)) { + if (smc->pre_5_2_numa_associativity || + ms->numa_state->num_nodes <= 1) { uint32_t legacy_refpoints[] = { cpu_to_be32(0x4), cpu_to_be32(0x4), @@ -365,6 +518,113 @@ void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas) maxdomains, sizeof(maxdomains))); } +static void spapr_numa_FORM2_write_rtas_tables(SpaprMachineState *spapr, + void *fdt, int rtas) +{ + MachineState *ms = MACHINE(spapr); + int nb_numa_nodes = ms->numa_state->num_nodes; + int distance_table_entries = nb_numa_nodes * nb_numa_nodes; + g_autofree uint32_t *lookup_index_table = NULL; + g_autofree uint8_t *distance_table = NULL; + int src, dst, i, distance_table_size; + + /* + * ibm,numa-lookup-index-table: array with length and a + * list of NUMA ids present in the guest. + */ + lookup_index_table = g_new0(uint32_t, nb_numa_nodes + 1); + lookup_index_table[0] = cpu_to_be32(nb_numa_nodes); + + for (i = 0; i < nb_numa_nodes; i++) { + lookup_index_table[i + 1] = cpu_to_be32(i); + } + + _FDT(fdt_setprop(fdt, rtas, "ibm,numa-lookup-index-table", + lookup_index_table, + (nb_numa_nodes + 1) * sizeof(uint32_t))); + + /* + * ibm,numa-distance-table: contains all node distances. First + * element is the size of the table as uint32, followed up + * by all the uint8 distances from the first NUMA node, then all + * distances from the second NUMA node and so on. + * + * ibm,numa-lookup-index-table is used by guest to navigate this + * array because NUMA ids can be sparse (node 0 is the first, + * node 8 is the second ...). + */ + distance_table_size = distance_table_entries * sizeof(uint8_t) + + sizeof(uint32_t); + distance_table = g_new0(uint8_t, distance_table_size); + stl_be_p(distance_table, distance_table_entries); + + /* Skip the uint32_t array length at the start */ + i = sizeof(uint32_t); + + for (src = 0; src < nb_numa_nodes; src++) { + for (dst = 0; dst < nb_numa_nodes; dst++) { + distance_table[i++] = get_numa_distance(ms, src, dst); + } + } + + _FDT(fdt_setprop(fdt, rtas, "ibm,numa-distance-table", + distance_table, distance_table_size)); +} + +/* + * This helper could be compressed in a single function with + * FORM1 logic since we're setting the same DT values, with the + * difference being a call to spapr_numa_FORM2_write_rtas_tables() + * in the end. The separation was made to avoid clogging FORM1 code + * which already has to deal with compat modes from previous + * QEMU machine types. + */ +static void spapr_numa_FORM2_write_rtas_dt(SpaprMachineState *spapr, + void *fdt, int rtas) +{ + MachineState *ms = MACHINE(spapr); + uint32_t number_nvgpus_nodes = spapr->gpu_numa_id - + spapr_numa_initial_nvgpu_numa_id(ms); + + /* + * In FORM2, ibm,associativity-reference-points will point to + * the element in the ibm,associativity array that contains the + * primary domain index (for FORM2, the first element). + * + * This value (in our case, the numa-id) is then used as an index + * to retrieve all other attributes of the node (distance, + * bandwidth, latency) via ibm,numa-lookup-index-table and other + * ibm,numa-*-table properties. + */ + uint32_t refpoints[] = { cpu_to_be32(1) }; + + uint32_t maxdomain = ms->numa_state->num_nodes + number_nvgpus_nodes; + uint32_t maxdomains[] = { cpu_to_be32(1), cpu_to_be32(maxdomain) }; + + _FDT(fdt_setprop(fdt, rtas, "ibm,associativity-reference-points", + refpoints, sizeof(refpoints))); + + _FDT(fdt_setprop(fdt, rtas, "ibm,max-associativity-domains", + maxdomains, sizeof(maxdomains))); + + spapr_numa_FORM2_write_rtas_tables(spapr, fdt, rtas); +} + +/* + * Helper that writes ibm,associativity-reference-points and + * max-associativity-domains in the RTAS pointed by @rtas + * in the DT @fdt. + */ +void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas) +{ + if (spapr_ovec_test(spapr->ov5_cas, OV5_FORM2_AFFINITY)) { + spapr_numa_FORM2_write_rtas_dt(spapr, fdt, rtas); + return; + } + + spapr_numa_FORM1_write_rtas_dt(spapr, fdt, rtas); +} + static target_ulong h_home_node_associativity(PowerPCCPU *cpu, SpaprMachineState *spapr, target_ulong opcode, @@ -375,6 +635,7 @@ static target_ulong h_home_node_associativity(PowerPCCPU *cpu, target_ulong procno = args[1]; PowerPCCPU *tcpu; int idx, assoc_idx; + int vcpu_assoc_size = get_vcpu_assoc_size(spapr); /* only support procno from H_REGISTER_VPA */ if (flags != 0x1) { @@ -393,7 +654,7 @@ static target_ulong h_home_node_associativity(PowerPCCPU *cpu, * 12 associativity domains for vcpus. Assert and bail if that's * not the case. */ - G_STATIC_ASSERT((VCPU_ASSOC_SIZE - 1) <= 12); + g_assert((vcpu_assoc_size - 1) <= 12); vcpu_assoc = spapr_numa_get_vcpu_assoc(spapr, tcpu); /* assoc_idx starts at 1 to skip associativity size */ @@ -414,9 +675,9 @@ static target_ulong h_home_node_associativity(PowerPCCPU *cpu, * macro. The ternary will fill the remaining registers with -1 * after we went through vcpu_assoc[]. */ - a = assoc_idx < VCPU_ASSOC_SIZE ? + a = assoc_idx < vcpu_assoc_size ? be32_to_cpu(vcpu_assoc[assoc_idx++]) : -1; - b = assoc_idx < VCPU_ASSOC_SIZE ? + b = assoc_idx < vcpu_assoc_size ? be32_to_cpu(vcpu_assoc[assoc_idx++]) : -1; args[idx] = ASSOCIATIVITY(a, b); diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c index 7430bd6314..5bfd4aa9e5 100644 --- a/hw/ppc/spapr_pci.c +++ b/hw/ppc/spapr_pci.c @@ -1317,8 +1317,7 @@ static int spapr_dt_pci_bus(SpaprPhbState *sphb, PCIBus *bus, RESOURCE_CELLS_SIZE)); assert(bus); - pci_for_each_device_reverse(bus, pci_bus_num(bus), - spapr_dt_pci_device_cb, &cbinfo); + pci_for_each_device_under_bus_reverse(bus, spapr_dt_pci_device_cb, &cbinfo); if (cbinfo.err) { return cbinfo.err; } @@ -2306,8 +2305,8 @@ static void spapr_phb_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev, return; } - pci_for_each_device(sec_bus, pci_bus_num(sec_bus), - spapr_phb_pci_enumerate_bridge, bus_no); + pci_for_each_device_under_bus(sec_bus, spapr_phb_pci_enumerate_bridge, + bus_no); pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, *bus_no, 1); } @@ -2316,9 +2315,8 @@ static void spapr_phb_pci_enumerate(SpaprPhbState *phb) PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus; unsigned int bus_no = 0; - pci_for_each_device(bus, pci_bus_num(bus), - spapr_phb_pci_enumerate_bridge, - &bus_no); + pci_for_each_device_under_bus(bus, spapr_phb_pci_enumerate_bridge, + &bus_no); } diff --git a/hw/ppc/spapr_pci_nvlink2.c b/hw/ppc/spapr_pci_nvlink2.c index 8ef9b40a18..7fb0cf4d04 100644 --- a/hw/ppc/spapr_pci_nvlink2.c +++ b/hw/ppc/spapr_pci_nvlink2.c @@ -164,8 +164,7 @@ static void spapr_phb_pci_collect_nvgpu(PCIBus *bus, PCIDevice *pdev, return; } - pci_for_each_device(sec_bus, pci_bus_num(sec_bus), - spapr_phb_pci_collect_nvgpu, opaque); + pci_for_each_device_under_bus(sec_bus, spapr_phb_pci_collect_nvgpu, opaque); } void spapr_phb_nvgpu_setup(SpaprPhbState *sphb, Error **errp) @@ -183,8 +182,8 @@ void spapr_phb_nvgpu_setup(SpaprPhbState *sphb, Error **errp) sphb->nvgpus->nv2_atsd_current = sphb->nv2_atsd_win_addr; bus = PCI_HOST_BRIDGE(sphb)->bus; - pci_for_each_device(bus, pci_bus_num(bus), - spapr_phb_pci_collect_nvgpu, sphb->nvgpus); + pci_for_each_device_under_bus(bus, spapr_phb_pci_collect_nvgpu, + sphb->nvgpus); if (sphb->nvgpus->err) { error_propagate(errp, sphb->nvgpus->err); diff --git a/hw/ppc/spapr_pci_vfio.c b/hw/ppc/spapr_pci_vfio.c index f3b37df8ea..2a76b4e0b5 100644 --- a/hw/ppc/spapr_pci_vfio.c +++ b/hw/ppc/spapr_pci_vfio.c @@ -164,8 +164,8 @@ static void spapr_phb_vfio_eeh_clear_dev_msix(PCIBus *bus, static void spapr_phb_vfio_eeh_clear_bus_msix(PCIBus *bus, void *opaque) { - pci_for_each_device(bus, pci_bus_num(bus), - spapr_phb_vfio_eeh_clear_dev_msix, NULL); + pci_for_each_device_under_bus(bus, spapr_phb_vfio_eeh_clear_dev_msix, + NULL); } static void spapr_phb_vfio_eeh_pre_reset(SpaprPhbState *sphb) diff --git a/hw/ppc/spapr_softmmu.c b/hw/ppc/spapr_softmmu.c index 6c6b86dd3c..f8924270ef 100644 --- a/hw/ppc/spapr_softmmu.c +++ b/hw/ppc/spapr_softmmu.c @@ -1,25 +1,10 @@ #include "qemu/osdep.h" #include "qemu/cutils.h" -#include "qapi/error.h" -#include "sysemu/hw_accel.h" -#include "sysemu/runstate.h" -#include "qemu/log.h" -#include "qemu/main-loop.h" -#include "qemu/module.h" -#include "qemu/error-report.h" #include "cpu.h" -#include "exec/exec-all.h" #include "helper_regs.h" #include "hw/ppc/spapr.h" -#include "hw/ppc/spapr_cpu_core.h" #include "mmu-hash64.h" -#include "cpu-models.h" -#include "trace.h" -#include "kvm_ppc.h" -#include "hw/ppc/fdt.h" -#include "hw/ppc/spapr_ovec.h" #include "mmu-book3s-v3.h" -#include "hw/mem/memory-device.h" static inline bool valid_ptex(PowerPCCPU *cpu, target_ulong ptex) { diff --git a/hw/ppc/spapr_vio.c b/hw/ppc/spapr_vio.c index b59452bcd6..b975ed29ca 100644 --- a/hw/ppc/spapr_vio.c +++ b/hw/ppc/spapr_vio.c @@ -577,7 +577,7 @@ SpaprVioBus *spapr_vio_bus_init(void) sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); /* Create bus on bridge device */ - qbus = qbus_create(TYPE_SPAPR_VIO_BUS, dev, "spapr-vio"); + qbus = qbus_new(TYPE_SPAPR_VIO_BUS, dev, "spapr-vio"); bus = SPAPR_VIO_BUS(qbus); bus->next_reg = SPAPR_VIO_REG_BASE; diff --git a/hw/ppc/trace-events b/hw/ppc/trace-events index da6e74b80d..3bf43fa340 100644 --- a/hw/ppc/trace-events +++ b/hw/ppc/trace-events @@ -97,7 +97,27 @@ vof_claimed(uint64_t start, uint64_t end, uint64_t size) "0x%"PRIx64"..0x%"PRIx6 # ppc.c ppc_tb_adjust(uint64_t offs1, uint64_t offs2, int64_t diff, int64_t seconds) "adjusted from 0x%"PRIx64" to 0x%"PRIx64", diff %"PRId64" (%"PRId64"s)" +ppc_tb_load(uint64_t tb) "tb 0x%016" PRIx64 +ppc_tb_store(uint64_t tb, uint64_t offset) "tb 0x%016" PRIx64 " offset 0x%08" PRIx64 +ppc_decr_load(uint64_t tb) "decr 0x%016" PRIx64 +ppc_decr_excp(const char *action) "%s decrementer" +ppc_decr_store(uint32_t nr_bits, uint64_t decr, uint64_t value) "%d-bit 0x%016" PRIx64 " => 0x%016" PRIx64 + +ppc4xx_fit(uint32_t ir, uint64_t tcr, uint64_t tsr) "ir %d TCR 0x%" PRIx64 " TSR 0x%" PRIx64 +ppc4xx_pit_stop(void) "" +ppc4xx_pit_start(uint64_t reload) "PIT 0x%016" PRIx64 +ppc4xx_pit(uint32_t ar, uint32_t ir, uint64_t tcr, uint64_t tsr, uint64_t reload) "ar %d ir %d TCR 0x%" PRIx64 " TSR 0x%" PRIx64 " PIT 0x%016" PRIx64 +ppc4xx_wdt(uint64_t tcr, uint64_t tsr) "TCR 0x%" PRIx64 " TSR 0x%" PRIx64 +ppc40x_store_pit(uint64_t value) "val 0x%" PRIx64 +ppc40x_set_tb_clk(uint32_t value) "new frequency %" PRIu32 +ppc40x_timers_init(uint32_t value) "frequency %" PRIu32 + +ppc_irq_set(void *env, uint32_t pin, uint32_t level) "env [%p] pin %d level %d" +ppc_irq_set_exit(void *env, uint32_t n_IRQ, uint32_t level, uint32_t pending, uint32_t request) "env [%p] n_IRQ %d level %d => pending 0x%08" PRIx32 " req 0x%08" PRIx32 +ppc_irq_set_state(const char *name, uint32_t level) "\"%s\" level %d" +ppc_irq_reset(const char *name) "%s" +ppc_irq_cpu(const char *action) "%s" # prep_systemio.c prep_systemio_read(uint32_t addr, uint32_t val) "read addr=0x%x val=0x%x" diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c index 49141d4074..cfd85de3e6 100644 --- a/hw/rdma/rdma_rm.c +++ b/hw/rdma/rdma_rm.c @@ -27,58 +27,58 @@ #define PG_DIR_SZ { TARGET_PAGE_SIZE / sizeof(__u64) } #define PG_TBL_SZ { TARGET_PAGE_SIZE / sizeof(__u64) } -void rdma_dump_device_counters(Monitor *mon, RdmaDeviceResources *dev_res) +void rdma_format_device_counters(RdmaDeviceResources *dev_res, GString *buf) { - monitor_printf(mon, "\ttx : %" PRId64 "\n", - dev_res->stats.tx); - monitor_printf(mon, "\ttx_len : %" PRId64 "\n", - dev_res->stats.tx_len); - monitor_printf(mon, "\ttx_err : %" PRId64 "\n", - dev_res->stats.tx_err); - monitor_printf(mon, "\trx_bufs : %" PRId64 "\n", - dev_res->stats.rx_bufs); - monitor_printf(mon, "\trx_srq : %" PRId64 "\n", - dev_res->stats.rx_srq); - monitor_printf(mon, "\trx_bufs_len : %" PRId64 "\n", - dev_res->stats.rx_bufs_len); - monitor_printf(mon, "\trx_bufs_err : %" PRId64 "\n", - dev_res->stats.rx_bufs_err); - monitor_printf(mon, "\tcomps : %" PRId64 "\n", - dev_res->stats.completions); - monitor_printf(mon, "\tmissing_comps : %" PRId32 "\n", - dev_res->stats.missing_cqe); - monitor_printf(mon, "\tpoll_cq (bk) : %" PRId64 "\n", - dev_res->stats.poll_cq_from_bk); - monitor_printf(mon, "\tpoll_cq_ppoll_to : %" PRId64 "\n", - dev_res->stats.poll_cq_ppoll_to); - monitor_printf(mon, "\tpoll_cq (fe) : %" PRId64 "\n", - dev_res->stats.poll_cq_from_guest); - monitor_printf(mon, "\tpoll_cq_empty : %" PRId64 "\n", - dev_res->stats.poll_cq_from_guest_empty); - monitor_printf(mon, "\tmad_tx : %" PRId64 "\n", - dev_res->stats.mad_tx); - monitor_printf(mon, "\tmad_tx_err : %" PRId64 "\n", - dev_res->stats.mad_tx_err); - monitor_printf(mon, "\tmad_rx : %" PRId64 "\n", - dev_res->stats.mad_rx); - monitor_printf(mon, "\tmad_rx_err : %" PRId64 "\n", - dev_res->stats.mad_rx_err); - monitor_printf(mon, "\tmad_rx_bufs : %" PRId64 "\n", - dev_res->stats.mad_rx_bufs); - monitor_printf(mon, "\tmad_rx_bufs_err : %" PRId64 "\n", - dev_res->stats.mad_rx_bufs_err); - monitor_printf(mon, "\tPDs : %" PRId32 "\n", - dev_res->pd_tbl.used); - monitor_printf(mon, "\tMRs : %" PRId32 "\n", - dev_res->mr_tbl.used); - monitor_printf(mon, "\tUCs : %" PRId32 "\n", - dev_res->uc_tbl.used); - monitor_printf(mon, "\tQPs : %" PRId32 "\n", - dev_res->qp_tbl.used); - monitor_printf(mon, "\tCQs : %" PRId32 "\n", - dev_res->cq_tbl.used); - monitor_printf(mon, "\tCEQ_CTXs : %" PRId32 "\n", - dev_res->cqe_ctx_tbl.used); + g_string_append_printf(buf, "\ttx : %" PRId64 "\n", + dev_res->stats.tx); + g_string_append_printf(buf, "\ttx_len : %" PRId64 "\n", + dev_res->stats.tx_len); + g_string_append_printf(buf, "\ttx_err : %" PRId64 "\n", + dev_res->stats.tx_err); + g_string_append_printf(buf, "\trx_bufs : %" PRId64 "\n", + dev_res->stats.rx_bufs); + g_string_append_printf(buf, "\trx_srq : %" PRId64 "\n", + dev_res->stats.rx_srq); + g_string_append_printf(buf, "\trx_bufs_len : %" PRId64 "\n", + dev_res->stats.rx_bufs_len); + g_string_append_printf(buf, "\trx_bufs_err : %" PRId64 "\n", + dev_res->stats.rx_bufs_err); + g_string_append_printf(buf, "\tcomps : %" PRId64 "\n", + dev_res->stats.completions); + g_string_append_printf(buf, "\tmissing_comps : %" PRId32 "\n", + dev_res->stats.missing_cqe); + g_string_append_printf(buf, "\tpoll_cq (bk) : %" PRId64 "\n", + dev_res->stats.poll_cq_from_bk); + g_string_append_printf(buf, "\tpoll_cq_ppoll_to : %" PRId64 "\n", + dev_res->stats.poll_cq_ppoll_to); + g_string_append_printf(buf, "\tpoll_cq (fe) : %" PRId64 "\n", + dev_res->stats.poll_cq_from_guest); + g_string_append_printf(buf, "\tpoll_cq_empty : %" PRId64 "\n", + dev_res->stats.poll_cq_from_guest_empty); + g_string_append_printf(buf, "\tmad_tx : %" PRId64 "\n", + dev_res->stats.mad_tx); + g_string_append_printf(buf, "\tmad_tx_err : %" PRId64 "\n", + dev_res->stats.mad_tx_err); + g_string_append_printf(buf, "\tmad_rx : %" PRId64 "\n", + dev_res->stats.mad_rx); + g_string_append_printf(buf, "\tmad_rx_err : %" PRId64 "\n", + dev_res->stats.mad_rx_err); + g_string_append_printf(buf, "\tmad_rx_bufs : %" PRId64 "\n", + dev_res->stats.mad_rx_bufs); + g_string_append_printf(buf, "\tmad_rx_bufs_err : %" PRId64 "\n", + dev_res->stats.mad_rx_bufs_err); + g_string_append_printf(buf, "\tPDs : %" PRId32 "\n", + dev_res->pd_tbl.used); + g_string_append_printf(buf, "\tMRs : %" PRId32 "\n", + dev_res->mr_tbl.used); + g_string_append_printf(buf, "\tUCs : %" PRId32 "\n", + dev_res->uc_tbl.used); + g_string_append_printf(buf, "\tQPs : %" PRId32 "\n", + dev_res->qp_tbl.used); + g_string_append_printf(buf, "\tCQs : %" PRId32 "\n", + dev_res->cq_tbl.used); + g_string_append_printf(buf, "\tCEQ_CTXs : %" PRId32 "\n", + dev_res->cqe_ctx_tbl.used); } static inline void res_tbl_init(const char *name, RdmaRmResTbl *tbl, diff --git a/hw/rdma/rdma_rm.h b/hw/rdma/rdma_rm.h index e8639909cd..d69a917795 100644 --- a/hw/rdma/rdma_rm.h +++ b/hw/rdma/rdma_rm.h @@ -92,6 +92,6 @@ static inline union ibv_gid *rdma_rm_get_gid(RdmaDeviceResources *dev_res, { return &dev_res->port.gid_tbl[sgid_idx].gid; } -void rdma_dump_device_counters(Monitor *mon, RdmaDeviceResources *dev_res); +void rdma_format_device_counters(RdmaDeviceResources *dev_res, GString *buf); #endif diff --git a/hw/rdma/vmw/pvrdma_main.c b/hw/rdma/vmw/pvrdma_main.c index 7c0c3551a8..91206dbb8e 100644 --- a/hw/rdma/vmw/pvrdma_main.c +++ b/hw/rdma/vmw/pvrdma_main.c @@ -58,24 +58,25 @@ static Property pvrdma_dev_properties[] = { DEFINE_PROP_END_OF_LIST(), }; -static void pvrdma_print_statistics(Monitor *mon, RdmaProvider *obj) +static void pvrdma_format_statistics(RdmaProvider *obj, GString *buf) { PVRDMADev *dev = PVRDMA_DEV(obj); PCIDevice *pdev = PCI_DEVICE(dev); - monitor_printf(mon, "%s, %x.%x\n", pdev->name, PCI_SLOT(pdev->devfn), - PCI_FUNC(pdev->devfn)); - monitor_printf(mon, "\tcommands : %" PRId64 "\n", - dev->stats.commands); - monitor_printf(mon, "\tregs_reads : %" PRId64 "\n", - dev->stats.regs_reads); - monitor_printf(mon, "\tregs_writes : %" PRId64 "\n", - dev->stats.regs_writes); - monitor_printf(mon, "\tuar_writes : %" PRId64 "\n", - dev->stats.uar_writes); - monitor_printf(mon, "\tinterrupts : %" PRId64 "\n", - dev->stats.interrupts); - rdma_dump_device_counters(mon, &dev->rdma_dev_res); + g_string_append_printf(buf, "%s, %x.%x\n", + pdev->name, PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); + g_string_append_printf(buf, "\tcommands : %" PRId64 "\n", + dev->stats.commands); + g_string_append_printf(buf, "\tregs_reads : %" PRId64 "\n", + dev->stats.regs_reads); + g_string_append_printf(buf, "\tregs_writes : %" PRId64 "\n", + dev->stats.regs_writes); + g_string_append_printf(buf, "\tuar_writes : %" PRId64 "\n", + dev->stats.uar_writes); + g_string_append_printf(buf, "\tinterrupts : %" PRId64 "\n", + dev->stats.interrupts); + rdma_format_device_counters(&dev->rdma_dev_res, buf); } static void free_dev_ring(PCIDevice *pci_dev, PvrdmaRing *ring, @@ -699,7 +700,7 @@ static void pvrdma_class_init(ObjectClass *klass, void *data) device_class_set_props(dc, pvrdma_dev_properties); set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); - ir->print_statistics = pvrdma_print_statistics; + ir->format_statistics = pvrdma_format_statistics; } static const TypeInfo pvrdma_info = { diff --git a/hw/remote/proxy-memory-listener.c b/hw/remote/proxy-memory-listener.c index 901dbf1357..882c9b4854 100644 --- a/hw/remote/proxy-memory-listener.c +++ b/hw/remote/proxy-memory-listener.c @@ -219,6 +219,7 @@ void proxy_memory_listener_configure(ProxyMemoryListener *proxy_listener, proxy_listener->listener.region_add = proxy_memory_listener_region_addnop; proxy_listener->listener.region_nop = proxy_memory_listener_region_addnop; proxy_listener->listener.priority = 10; + proxy_listener->listener.name = "proxy"; memory_listener_register(&proxy_listener->listener, &address_space_memory); diff --git a/hw/remote/proxy.c b/hw/remote/proxy.c index 499f540c94..bad164299d 100644 --- a/hw/remote/proxy.c +++ b/hw/remote/proxy.c @@ -324,6 +324,7 @@ static void probe_pci_info(PCIDevice *dev, Error **errp) set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); break; case PCI_BASE_CLASS_NETWORK: + case PCI_BASE_CLASS_WIRELESS: set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); break; case PCI_BASE_CLASS_INPUT: diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c index 993bf89064..519fa455a1 100644 --- a/hw/riscv/boot.c +++ b/hw/riscv/boot.c @@ -35,7 +35,32 @@ bool riscv_is_32bit(RISCVHartArrayState *harts) { - return riscv_cpu_is_32bit(&harts->harts[0].env); + return harts->harts[0].env.misa_mxl_max == MXL_RV32; +} + +/* + * Return the per-socket PLIC hart topology configuration string + * (caller must free with g_free()) + */ +char *riscv_plic_hart_config_string(int hart_count) +{ + g_autofree const char **vals = g_new(const char *, hart_count + 1); + int i; + + for (i = 0; i < hart_count; i++) { + CPUState *cs = qemu_get_cpu(i); + CPURISCVState *env = &RISCV_CPU(cs)->env; + + if (riscv_has_ext(env, RVS)) { + vals[i] = "MS"; + } else { + vals[i] = "M"; + } + } + vals[i] = NULL; + + /* g_strjoinv() obliges us to cast away const here */ + return g_strjoinv(",", (char **)vals); } target_ulong riscv_calc_kernel_start_addr(RISCVHartArrayState *harts, diff --git a/hw/riscv/microchip_pfsoc.c b/hw/riscv/microchip_pfsoc.c index e475b6d511..57d779fb55 100644 --- a/hw/riscv/microchip_pfsoc.c +++ b/hw/riscv/microchip_pfsoc.c @@ -187,7 +187,6 @@ static void microchip_pfsoc_soc_realize(DeviceState *dev, Error **errp) MemoryRegion *envm_data = g_new(MemoryRegion, 1); MemoryRegion *qspi_xip_mem = g_new(MemoryRegion, 1); char *plic_hart_config; - size_t plic_hart_config_len; NICInfo *nd; int i; @@ -262,18 +261,7 @@ static void microchip_pfsoc_soc_realize(DeviceState *dev, Error **errp) l2lim_mem); /* create PLIC hart topology configuration string */ - plic_hart_config_len = (strlen(MICROCHIP_PFSOC_PLIC_HART_CONFIG) + 1) * - ms->smp.cpus; - plic_hart_config = g_malloc0(plic_hart_config_len); - for (i = 0; i < ms->smp.cpus; i++) { - if (i != 0) { - strncat(plic_hart_config, "," MICROCHIP_PFSOC_PLIC_HART_CONFIG, - plic_hart_config_len); - } else { - strncat(plic_hart_config, "M", plic_hart_config_len); - } - plic_hart_config_len -= (strlen(MICROCHIP_PFSOC_PLIC_HART_CONFIG) + 1); - } + plic_hart_config = riscv_plic_hart_config_string(ms->smp.cpus); /* PLIC */ s->plic = sifive_plic_create(memmap[MICROCHIP_PFSOC_PLIC].base, @@ -463,7 +451,7 @@ static void microchip_icicle_kit_machine_init(MachineState *machine) MemoryRegion *mem_low_alias = g_new(MemoryRegion, 1); MemoryRegion *mem_high = g_new(MemoryRegion, 1); MemoryRegion *mem_high_alias = g_new(MemoryRegion, 1); - uint64_t mem_high_size; + uint64_t mem_low_size, mem_high_size; hwaddr firmware_load_addr; const char *firmware_name; bool kernel_as_payload = false; @@ -485,31 +473,34 @@ static void microchip_icicle_kit_machine_init(MachineState *machine) TYPE_MICROCHIP_PFSOC); qdev_realize(DEVICE(&s->soc), NULL, &error_abort); + /* Split RAM into low and high regions using aliases to machine->ram */ + mem_low_size = memmap[MICROCHIP_PFSOC_DRAM_LO].size; + mem_high_size = machine->ram_size - mem_low_size; + memory_region_init_alias(mem_low, NULL, + "microchip.icicle.kit.ram_low", machine->ram, + 0, mem_low_size); + memory_region_init_alias(mem_high, NULL, + "microchip.icicle.kit.ram_high", machine->ram, + mem_low_size, mem_high_size); + /* Register RAM */ - memory_region_init_ram(mem_low, NULL, "microchip.icicle.kit.ram_low", - memmap[MICROCHIP_PFSOC_DRAM_LO].size, - &error_fatal); - memory_region_init_alias(mem_low_alias, NULL, - "microchip.icicle.kit.ram_low.alias", - mem_low, 0, - memmap[MICROCHIP_PFSOC_DRAM_LO_ALIAS].size); memory_region_add_subregion(system_memory, memmap[MICROCHIP_PFSOC_DRAM_LO].base, mem_low); + memory_region_add_subregion(system_memory, + memmap[MICROCHIP_PFSOC_DRAM_HI].base, + mem_high); + + /* Create aliases for the low and high RAM regions */ + memory_region_init_alias(mem_low_alias, NULL, + "microchip.icicle.kit.ram_low.alias", + mem_low, 0, mem_low_size); memory_region_add_subregion(system_memory, memmap[MICROCHIP_PFSOC_DRAM_LO_ALIAS].base, mem_low_alias); - - mem_high_size = machine->ram_size - 1 * GiB; - - memory_region_init_ram(mem_high, NULL, "microchip.icicle.kit.ram_high", - mem_high_size, &error_fatal); memory_region_init_alias(mem_high_alias, NULL, "microchip.icicle.kit.ram_high.alias", mem_high, 0, mem_high_size); - memory_region_add_subregion(system_memory, - memmap[MICROCHIP_PFSOC_DRAM_HI].base, - mem_high); memory_region_add_subregion(system_memory, memmap[MICROCHIP_PFSOC_DRAM_HI_ALIAS].base, mem_high_alias); @@ -606,6 +597,7 @@ static void microchip_icicle_kit_machine_class_init(ObjectClass *oc, void *data) MICROCHIP_PFSOC_COMPUTE_CPU_COUNT; mc->min_cpus = MICROCHIP_PFSOC_MANAGEMENT_CPU_COUNT + 1; mc->default_cpus = mc->min_cpus; + mc->default_ram_id = "microchip.icicle.kit.ram"; /* * Map 513 MiB high memory, the mimimum required high memory size, because diff --git a/hw/riscv/opentitan.c b/hw/riscv/opentitan.c index 9803ae6d70..c531450b9f 100644 --- a/hw/riscv/opentitan.c +++ b/hw/riscv/opentitan.c @@ -19,6 +19,7 @@ */ #include "qemu/osdep.h" +#include "qemu/cutils.h" #include "hw/riscv/opentitan.h" #include "qapi/error.h" #include "hw/boards.h" @@ -46,38 +47,43 @@ static const MemMapEntry ibex_memmap[] = { [IBEX_DEV_PINMUX] = { 0x40460000, 0x1000 }, [IBEX_DEV_PADCTRL] = { 0x40470000, 0x1000 }, [IBEX_DEV_FLASH_CTRL] = { 0x41000000, 0x1000 }, - [IBEX_DEV_PLIC] = { 0x41010000, 0x1000 }, [IBEX_DEV_AES] = { 0x41100000, 0x1000 }, [IBEX_DEV_HMAC] = { 0x41110000, 0x1000 }, [IBEX_DEV_KMAC] = { 0x41120000, 0x1000 }, - [IBEX_DEV_KEYMGR] = { 0x41130000, 0x1000 }, + [IBEX_DEV_OTBN] = { 0x41130000, 0x10000 }, + [IBEX_DEV_KEYMGR] = { 0x41140000, 0x1000 }, [IBEX_DEV_CSRNG] = { 0x41150000, 0x1000 }, [IBEX_DEV_ENTROPY] = { 0x41160000, 0x1000 }, [IBEX_DEV_EDNO] = { 0x41170000, 0x1000 }, [IBEX_DEV_EDN1] = { 0x41180000, 0x1000 }, [IBEX_DEV_ALERT_HANDLER] = { 0x411b0000, 0x1000 }, [IBEX_DEV_NMI_GEN] = { 0x411c0000, 0x1000 }, - [IBEX_DEV_OTBN] = { 0x411d0000, 0x10000 }, [IBEX_DEV_PERI] = { 0x411f0000, 0x10000 }, + [IBEX_DEV_PLIC] = { 0x48000000, 0x4005000 }, [IBEX_DEV_FLASH_VIRTUAL] = { 0x80000000, 0x80000 }, }; static void opentitan_board_init(MachineState *machine) { + MachineClass *mc = MACHINE_GET_CLASS(machine); const MemMapEntry *memmap = ibex_memmap; OpenTitanState *s = g_new0(OpenTitanState, 1); MemoryRegion *sys_mem = get_system_memory(); - MemoryRegion *main_mem = g_new(MemoryRegion, 1); + + if (machine->ram_size != mc->default_ram_size) { + char *sz = size_to_str(mc->default_ram_size); + error_report("Invalid RAM size, should be %s", sz); + g_free(sz); + exit(EXIT_FAILURE); + } /* Initialize SoC */ object_initialize_child(OBJECT(machine), "soc", &s->soc, TYPE_RISCV_IBEX_SOC); qdev_realize(DEVICE(&s->soc), NULL, &error_abort); - memory_region_init_ram(main_mem, NULL, "riscv.lowrisc.ibex.ram", - memmap[IBEX_DEV_RAM].size, &error_fatal); memory_region_add_subregion(sys_mem, - memmap[IBEX_DEV_RAM].base, main_mem); + memmap[IBEX_DEV_RAM].base, machine->ram); if (machine->firmware) { riscv_load_firmware(machine->firmware, memmap[IBEX_DEV_RAM].base, NULL); @@ -95,6 +101,8 @@ static void opentitan_machine_init(MachineClass *mc) mc->init = opentitan_board_init; mc->max_cpus = 1; mc->default_cpu_type = TYPE_RISCV_CPU_IBEX; + mc->default_ram_id = "riscv.lowrisc.ibex.ram"; + mc->default_ram_size = ibex_memmap[IBEX_DEV_RAM].size; } DEFINE_MACHINE("opentitan", opentitan_machine_init) @@ -105,7 +113,7 @@ static void lowrisc_ibex_soc_init(Object *obj) object_initialize_child(obj, "cpus", &s->cpus, TYPE_RISCV_HART_ARRAY); - object_initialize_child(obj, "plic", &s->plic, TYPE_IBEX_PLIC); + object_initialize_child(obj, "plic", &s->plic, TYPE_SIFIVE_PLIC); object_initialize_child(obj, "uart", &s->uart, TYPE_IBEX_UART); @@ -145,6 +153,18 @@ static void lowrisc_ibex_soc_realize(DeviceState *dev_soc, Error **errp) &s->flash_alias); /* PLIC */ + qdev_prop_set_string(DEVICE(&s->plic), "hart-config", "M"); + qdev_prop_set_uint32(DEVICE(&s->plic), "hartid-base", 0); + qdev_prop_set_uint32(DEVICE(&s->plic), "num-sources", 180); + qdev_prop_set_uint32(DEVICE(&s->plic), "num-priorities", 3); + qdev_prop_set_uint32(DEVICE(&s->plic), "priority-base", 0x00); + qdev_prop_set_uint32(DEVICE(&s->plic), "pending-base", 0x1000); + qdev_prop_set_uint32(DEVICE(&s->plic), "enable-base", 0x2000); + qdev_prop_set_uint32(DEVICE(&s->plic), "enable-stride", 0x18); + qdev_prop_set_uint32(DEVICE(&s->plic), "context-base", 0x200000); + qdev_prop_set_uint32(DEVICE(&s->plic), "context-stride", 8); + qdev_prop_set_uint32(DEVICE(&s->plic), "aperture-size", memmap[IBEX_DEV_PLIC].size); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->plic), errp)) { return; } @@ -153,7 +173,7 @@ static void lowrisc_ibex_soc_realize(DeviceState *dev_soc, Error **errp) for (i = 0; i < ms->smp.cpus; i++) { CPUState *cpu = qemu_get_cpu(i); - qdev_connect_gpio_out(DEVICE(&s->plic), i, + qdev_connect_gpio_out(DEVICE(&s->plic), ms->smp.cpus + i, qdev_get_gpio_in(DEVICE(cpu), IRQ_M_EXT)); } diff --git a/hw/riscv/shakti_c.c b/hw/riscv/shakti_c.c index 2f084d3c8d..90e2cf609f 100644 --- a/hw/riscv/shakti_c.c +++ b/hw/riscv/shakti_c.c @@ -45,7 +45,6 @@ static void shakti_c_machine_state_init(MachineState *mstate) { ShaktiCMachineState *sms = RISCV_SHAKTI_MACHINE(mstate); MemoryRegion *system_memory = get_system_memory(); - MemoryRegion *main_mem = g_new(MemoryRegion, 1); /* Allow only Shakti C CPU for this platform */ if (strcmp(mstate->cpu_type, TYPE_RISCV_CPU_SHAKTI_C) != 0) { @@ -59,11 +58,9 @@ static void shakti_c_machine_state_init(MachineState *mstate) qdev_realize(DEVICE(&sms->soc), NULL, &error_abort); /* register RAM */ - memory_region_init_ram(main_mem, NULL, "riscv.shakti.c.ram", - mstate->ram_size, &error_fatal); memory_region_add_subregion(system_memory, shakti_c_memmap[SHAKTI_C_RAM].base, - main_mem); + mstate->ram); /* ROM reset vector */ riscv_setup_rom_reset_vec(mstate, &sms->soc.cpus, @@ -88,6 +85,7 @@ static void shakti_c_machine_class_init(ObjectClass *klass, void *data) mc->desc = "RISC-V Board compatible with Shakti SDK"; mc->init = shakti_c_machine_state_init; mc->default_cpu_type = TYPE_RISCV_CPU_SHAKTI_C; + mc->default_ram_id = "riscv.shakti.c.ram"; } static const TypeInfo shakti_c_machine_type_info = { @@ -150,6 +148,13 @@ static void shakti_c_soc_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = shakti_c_soc_state_realize; + /* + * Reasons: + * - Creates CPUS in riscv_hart_realize(), and can create unintended + * CPUs + * - Uses serial_hds in realize function, thus can't be used twice + */ + dc->user_creatable = false; } static void shakti_c_soc_instance_init(Object *obj) diff --git a/hw/riscv/sifive_e.c b/hw/riscv/sifive_e.c index 6e95ea5896..9b206407a6 100644 --- a/hw/riscv/sifive_e.c +++ b/hw/riscv/sifive_e.c @@ -29,6 +29,7 @@ */ #include "qemu/osdep.h" +#include "qemu/cutils.h" #include "qemu/error-report.h" #include "qapi/error.h" #include "hw/boards.h" @@ -71,22 +72,27 @@ static const MemMapEntry sifive_e_memmap[] = { static void sifive_e_machine_init(MachineState *machine) { + MachineClass *mc = MACHINE_GET_CLASS(machine); const MemMapEntry *memmap = sifive_e_memmap; SiFiveEState *s = RISCV_E_MACHINE(machine); MemoryRegion *sys_mem = get_system_memory(); - MemoryRegion *main_mem = g_new(MemoryRegion, 1); int i; + if (machine->ram_size != mc->default_ram_size) { + char *sz = size_to_str(mc->default_ram_size); + error_report("Invalid RAM size, should be %s", sz); + g_free(sz); + exit(EXIT_FAILURE); + } + /* Initialize SoC */ object_initialize_child(OBJECT(machine), "soc", &s->soc, TYPE_RISCV_E_SOC); qdev_realize(DEVICE(&s->soc), NULL, &error_abort); /* Data Tightly Integrated Memory */ - memory_region_init_ram(main_mem, NULL, "riscv.sifive.e.ram", - memmap[SIFIVE_E_DEV_DTIM].size, &error_fatal); memory_region_add_subregion(sys_mem, - memmap[SIFIVE_E_DEV_DTIM].base, main_mem); + memmap[SIFIVE_E_DEV_DTIM].base, machine->ram); /* Mask ROM reset vector */ uint32_t reset_vec[4]; @@ -142,6 +148,8 @@ static void sifive_e_machine_class_init(ObjectClass *oc, void *data) mc->init = sifive_e_machine_init; mc->max_cpus = 1; mc->default_cpu_type = SIFIVE_E_CPU; + mc->default_ram_id = "riscv.sifive.e.ram"; + mc->default_ram_size = sifive_e_memmap[SIFIVE_E_DEV_DTIM].size; object_class_property_add_bool(oc, "revb", sifive_e_machine_get_revb, sifive_e_machine_set_revb); diff --git a/hw/riscv/sifive_u.c b/hw/riscv/sifive_u.c index fc5790b8ce..589ae72a59 100644 --- a/hw/riscv/sifive_u.c +++ b/hw/riscv/sifive_u.c @@ -528,7 +528,6 @@ static void sifive_u_machine_init(MachineState *machine) const MemMapEntry *memmap = sifive_u_memmap; SiFiveUState *s = RISCV_U_MACHINE(machine); MemoryRegion *system_memory = get_system_memory(); - MemoryRegion *main_mem = g_new(MemoryRegion, 1); MemoryRegion *flash0 = g_new(MemoryRegion, 1); target_ulong start_addr = memmap[SIFIVE_U_DEV_DRAM].base; target_ulong firmware_end_addr, kernel_start_addr; @@ -549,10 +548,8 @@ static void sifive_u_machine_init(MachineState *machine) qdev_realize(DEVICE(&s->soc), NULL, &error_abort); /* register RAM */ - memory_region_init_ram(main_mem, NULL, "riscv.sifive.u.ram", - machine->ram_size, &error_fatal); memory_region_add_subregion(system_memory, memmap[SIFIVE_U_DEV_DRAM].base, - main_mem); + machine->ram); /* register QSPI0 Flash */ memory_region_init_ram(flash0, NULL, "riscv.sifive.u.flash0", @@ -748,6 +745,7 @@ static void sifive_u_machine_class_init(ObjectClass *oc, void *data) mc->min_cpus = SIFIVE_U_MANAGEMENT_CPU_COUNT + 1; mc->default_cpu_type = SIFIVE_U_CPU; mc->default_cpus = mc->min_cpus; + mc->default_ram_id = "riscv.sifive.u.ram"; object_class_property_add_bool(oc, "start-in-flash", sifive_u_machine_get_start_in_flash, @@ -813,7 +811,6 @@ static void sifive_u_soc_realize(DeviceState *dev, Error **errp) MemoryRegion *mask_rom = g_new(MemoryRegion, 1); MemoryRegion *l2lim_mem = g_new(MemoryRegion, 1); char *plic_hart_config; - size_t plic_hart_config_len; int i, j; NICInfo *nd = &nd_table[0]; @@ -854,18 +851,7 @@ static void sifive_u_soc_realize(DeviceState *dev, Error **errp) l2lim_mem); /* create PLIC hart topology configuration string */ - plic_hart_config_len = (strlen(SIFIVE_U_PLIC_HART_CONFIG) + 1) * - ms->smp.cpus; - plic_hart_config = g_malloc0(plic_hart_config_len); - for (i = 0; i < ms->smp.cpus; i++) { - if (i != 0) { - strncat(plic_hart_config, "," SIFIVE_U_PLIC_HART_CONFIG, - plic_hart_config_len); - } else { - strncat(plic_hart_config, "M", plic_hart_config_len); - } - plic_hart_config_len -= (strlen(SIFIVE_U_PLIC_HART_CONFIG) + 1); - } + plic_hart_config = riscv_plic_hart_config_string(ms->smp.cpus); /* MMIO */ s->plic = sifive_plic_create(memmap[SIFIVE_U_DEV_PLIC].base, diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c index 79ae355ae2..288d69cd9f 100644 --- a/hw/riscv/spike.c +++ b/hw/riscv/spike.c @@ -180,7 +180,6 @@ static void spike_board_init(MachineState *machine) const MemMapEntry *memmap = spike_memmap; SpikeState *s = SPIKE_MACHINE(machine); MemoryRegion *system_memory = get_system_memory(); - MemoryRegion *main_mem = g_new(MemoryRegion, 1); MemoryRegion *mask_rom = g_new(MemoryRegion, 1); target_ulong firmware_end_addr, kernel_start_addr; uint32_t fdt_load_addr; @@ -239,10 +238,8 @@ static void spike_board_init(MachineState *machine) } /* register system main memory (actual RAM) */ - memory_region_init_ram(main_mem, NULL, "riscv.spike.ram", - machine->ram_size, &error_fatal); memory_region_add_subregion(system_memory, memmap[SPIKE_DRAM].base, - main_mem); + machine->ram); /* create device tree */ create_fdt(s, memmap, machine->ram_size, machine->kernel_cmdline, @@ -326,6 +323,7 @@ static void spike_machine_class_init(ObjectClass *oc, void *data) mc->cpu_index_to_instance_props = riscv_numa_cpu_index_to_props; mc->get_default_cpu_node_id = riscv_numa_get_default_cpu_node_id; mc->numa_mem_supported = true; + mc->default_ram_id = "riscv.spike.ram"; } static const TypeInfo spike_machine_typeinfo = { diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c index ec0cb69b8c..3af074148e 100644 --- a/hw/riscv/virt.c +++ b/hw/riscv/virt.c @@ -748,30 +748,11 @@ static FWCfgState *create_fw_cfg(const MachineState *mc) return fw_cfg; } -/* - * Return the per-socket PLIC hart topology configuration string - * (caller must free with g_free()) - */ -static char *plic_hart_config_string(int hart_count) -{ - g_autofree const char **vals = g_new(const char *, hart_count + 1); - int i; - - for (i = 0; i < hart_count; i++) { - vals[i] = VIRT_PLIC_HART_CONFIG; - } - vals[i] = NULL; - - /* g_strjoinv() obliges us to cast away const here */ - return g_strjoinv(",", (char **)vals); -} - static void virt_machine_init(MachineState *machine) { const MemMapEntry *memmap = virt_memmap; RISCVVirtState *s = RISCV_VIRT_MACHINE(machine); MemoryRegion *system_memory = get_system_memory(); - MemoryRegion *main_mem = g_new(MemoryRegion, 1); MemoryRegion *mask_rom = g_new(MemoryRegion, 1); char *plic_hart_config, *soc_name; target_ulong start_addr = memmap[VIRT_DRAM].base; @@ -840,7 +821,7 @@ static void virt_machine_init(MachineState *machine) } /* Per-socket PLIC hart topology configuration string */ - plic_hart_config = plic_hart_config_string(hart_count); + plic_hart_config = riscv_plic_hart_config_string(hart_count); /* Per-socket PLIC */ s->plic[i] = sifive_plic_create( @@ -890,10 +871,8 @@ static void virt_machine_init(MachineState *machine) } /* register system main memory (actual RAM) */ - memory_region_init_ram(main_mem, NULL, "riscv_virt_board.ram", - machine->ram_size, &error_fatal); memory_region_add_subregion(system_memory, memmap[VIRT_DRAM].base, - main_mem); + machine->ram); /* create device tree */ create_fdt(s, memmap, machine->ram_size, machine->kernel_cmdline, @@ -1032,6 +1011,7 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) mc->cpu_index_to_instance_props = riscv_numa_cpu_index_to_props; mc->get_default_cpu_node_id = riscv_numa_get_default_cpu_node_id; mc->numa_mem_supported = true; + mc->default_ram_id = "riscv_virt_board.ram"; machine_class_allow_dynamic_sysbus_dev(mc, TYPE_RAMFB_DEVICE); diff --git a/hw/s390x/ap-bridge.c b/hw/s390x/ap-bridge.c index 8bcf8ece9d..ef8fa2b15b 100644 --- a/hw/s390x/ap-bridge.c +++ b/hw/s390x/ap-bridge.c @@ -55,7 +55,7 @@ void s390_init_ap(void) sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); /* Create bus on bridge device */ - bus = qbus_create(TYPE_AP_BUS, dev, TYPE_AP_BUS); + bus = qbus_new(TYPE_AP_BUS, dev, TYPE_AP_BUS); /* Enable hotplugging */ qbus_set_hotplug_handler(bus, OBJECT(dev)); diff --git a/hw/s390x/css-bridge.c b/hw/s390x/css-bridge.c index 191b29f077..4017081d49 100644 --- a/hw/s390x/css-bridge.c +++ b/hw/s390x/css-bridge.c @@ -106,7 +106,7 @@ VirtualCssBus *virtual_css_bus_init(void) sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); /* Create bus on bridge device */ - bus = qbus_create(TYPE_VIRTUAL_CSS_BUS, dev, "virtual-css"); + bus = qbus_new(TYPE_VIRTUAL_CSS_BUS, dev, "virtual-css"); cbus = VIRTUAL_CSS_BUS(bus); /* Enable hotplugging */ diff --git a/hw/s390x/event-facility.c b/hw/s390x/event-facility.c index ed92ce510d..6fa47b889c 100644 --- a/hw/s390x/event-facility.c +++ b/hw/s390x/event-facility.c @@ -427,8 +427,8 @@ static void init_event_facility(Object *obj) sclp_event_set_allow_all_mask_sizes); /* Spawn a new bus for SCLP events */ - qbus_create_inplace(&event_facility->sbus, sizeof(event_facility->sbus), - TYPE_SCLP_EVENTS_BUS, sdev, NULL); + qbus_init(&event_facility->sbus, sizeof(event_facility->sbus), + TYPE_SCLP_EVENTS_BUS, sdev, NULL); object_initialize_child(obj, TYPE_SCLP_QUIESCE, &event_facility->quiesce, diff --git a/hw/s390x/ipl.c b/hw/s390x/ipl.c index 1821c6faee..7ddca0127f 100644 --- a/hw/s390x/ipl.c +++ b/hw/s390x/ipl.c @@ -38,6 +38,7 @@ #define KERN_IMAGE_START 0x010000UL #define LINUX_MAGIC_ADDR 0x010008UL #define KERN_PARM_AREA 0x010480UL +#define KERN_PARM_AREA_SIZE 0x000380UL #define INITRD_START 0x800000UL #define INITRD_PARM_START 0x010408UL #define PARMFILE_START 0x001000UL @@ -190,10 +191,19 @@ static void s390_ipl_realize(DeviceState *dev, Error **errp) * loader) and it won't work. For this case we force it to 0x10000, too. */ if (pentry == KERN_IMAGE_START || pentry == 0x800) { - char *parm_area = rom_ptr(KERN_PARM_AREA, strlen(ipl->cmdline) + 1); + size_t cmdline_size = strlen(ipl->cmdline) + 1; + char *parm_area = rom_ptr(KERN_PARM_AREA, cmdline_size); + ipl->start_addr = KERN_IMAGE_START; /* Overwrite parameters in the kernel image, which are "rom" */ if (parm_area) { + if (cmdline_size > KERN_PARM_AREA_SIZE) { + error_setg(errp, + "kernel command line exceeds maximum size: %zu > %lu", + cmdline_size, KERN_PARM_AREA_SIZE); + return; + } + strcpy(parm_area, ipl->cmdline); } } else { diff --git a/hw/s390x/s390-pci-bus.c b/hw/s390x/s390-pci-bus.c index 6c0225c3a0..1b51a72838 100644 --- a/hw/s390x/s390-pci-bus.c +++ b/hw/s390x/s390-pci-bus.c @@ -813,7 +813,7 @@ static void s390_pcihost_realize(DeviceState *dev, Error **errp) qbus_set_hotplug_handler(bus, OBJECT(dev)); phb->bus = b; - s->bus = S390_PCI_BUS(qbus_create(TYPE_S390_PCI_BUS, dev, NULL)); + s->bus = S390_PCI_BUS(qbus_new(TYPE_S390_PCI_BUS, dev, NULL)); qbus_set_hotplug_handler(BUS(s->bus), OBJECT(dev)); s->iommu_table = g_hash_table_new_full(g_int64_hash, g_int64_equal, @@ -1163,8 +1163,7 @@ static void s390_pci_enumerate_bridge(PCIBus *bus, PCIDevice *pdev, } /* Assign numbers to all child bridges. The last is the highest number. */ - pci_for_each_device(sec_bus, pci_bus_num(sec_bus), - s390_pci_enumerate_bridge, s); + pci_for_each_device_under_bus(sec_bus, s390_pci_enumerate_bridge, s); pci_default_write_config(pdev, PCI_SUBORDINATE_BUS, s->bus_no, 1); } @@ -1193,7 +1192,7 @@ static void s390_pcihost_reset(DeviceState *dev) * on every system reset, we also have to reassign numbers. */ s->bus_no = 0; - pci_for_each_device(bus, pci_bus_num(bus), s390_pci_enumerate_bridge, s); + pci_for_each_device_under_bus(bus, s390_pci_enumerate_bridge, s); } static void s390_pcihost_class_init(ObjectClass *klass, void *data) diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c index 61aeccb163..653587ea62 100644 --- a/hw/s390x/s390-virtio-ccw.c +++ b/hw/s390x/s390-virtio-ccw.c @@ -814,6 +814,7 @@ static void ccw_machine_6_1_class_options(MachineClass *mc) { ccw_machine_6_2_class_options(mc); compat_props_add(mc->compat_props, hw_compat_6_1, hw_compat_6_1_len); + mc->smp_props.prefer_sockets = true; } DEFINE_CCW_MACHINE(6_1, "6.1", false); diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c index 6a2df1c1e9..c845a92c3a 100644 --- a/hw/s390x/virtio-ccw.c +++ b/hw/s390x/virtio-ccw.c @@ -1261,8 +1261,7 @@ static void virtio_ccw_bus_new(VirtioBusState *bus, size_t bus_size, DeviceState *qdev = DEVICE(dev); char virtio_bus_name[] = "virtio-bus"; - qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_CCW_BUS, - qdev, virtio_bus_name); + qbus_init(bus, bus_size, TYPE_VIRTIO_CCW_BUS, qdev, virtio_bus_name); } static void virtio_ccw_bus_class_init(ObjectClass *klass, void *data) diff --git a/hw/scsi/esp-pci.c b/hw/scsi/esp-pci.c index 9db10b1a48..dac054aeed 100644 --- a/hw/scsi/esp-pci.c +++ b/hw/scsi/esp-pci.c @@ -388,7 +388,7 @@ static void esp_pci_scsi_realize(PCIDevice *dev, Error **errp) pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &pci->io); s->irq = pci_allocate_irq(dev); - scsi_bus_new(&s->bus, sizeof(s->bus), d, &esp_pci_scsi_info, NULL); + scsi_bus_init(&s->bus, sizeof(s->bus), d, &esp_pci_scsi_info); } static void esp_pci_scsi_exit(PCIDevice *d) diff --git a/hw/scsi/esp.c b/hw/scsi/esp.c index 4ac2114788..84f935b549 100644 --- a/hw/scsi/esp.c +++ b/hw/scsi/esp.c @@ -204,11 +204,6 @@ static int esp_select(ESPState *s) s->ti_size = 0; fifo8_reset(&s->fifo); - if (s->current_req) { - /* Started a new command before the old one finished. Cancel it. */ - scsi_req_cancel(s->current_req); - } - s->current_dev = scsi_device_find(&s->bus, 0, target, 0); if (!s->current_dev) { /* No such drive */ @@ -235,6 +230,11 @@ static uint32_t get_cmd(ESPState *s, uint32_t maxlen) uint32_t dmalen, n; int target; + if (s->current_req) { + /* Started a new command before the old one finished. Cancel it. */ + scsi_req_cancel(s->current_req); + } + target = s->wregs[ESP_WBUSID] & BUSID_DID; if (s->dma) { dmalen = MIN(esp_get_tc(s), maxlen); @@ -1348,7 +1348,7 @@ static void sysbus_esp_realize(DeviceState *dev, Error **errp) qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2); - scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL); + scsi_bus_init(&s->bus, sizeof(s->bus), dev, &esp_scsi_info); } static void sysbus_esp_hard_reset(DeviceState *dev) diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c index e2c19180a0..85e907a785 100644 --- a/hw/scsi/lsi53c895a.c +++ b/hw/scsi/lsi53c895a.c @@ -2309,7 +2309,7 @@ static void lsi_scsi_realize(PCIDevice *dev, Error **errp) pci_register_bar(dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->ram_io); QTAILQ_INIT(&s->queue); - scsi_bus_new(&s->bus, sizeof(s->bus), d, &lsi_scsi_info, NULL); + scsi_bus_init(&s->bus, sizeof(s->bus), d, &lsi_scsi_info); } static void lsi_scsi_exit(PCIDevice *dev) diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c index 8f2389d2c6..4ff51221d4 100644 --- a/hw/scsi/megasas.c +++ b/hw/scsi/megasas.c @@ -2416,8 +2416,7 @@ static void megasas_scsi_realize(PCIDevice *dev, Error **errp) s->frames[i].state = s; } - scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(dev), - &megasas_scsi_info, NULL); + scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(dev), &megasas_scsi_info); } static Property megasas_properties_gen1[] = { diff --git a/hw/scsi/mptsas.c b/hw/scsi/mptsas.c index db3219e7d2..f6c7765544 100644 --- a/hw/scsi/mptsas.c +++ b/hw/scsi/mptsas.c @@ -1315,7 +1315,7 @@ static void mptsas_scsi_realize(PCIDevice *dev, Error **errp) s->request_bh = qemu_bh_new(mptsas_fetch_requests, s); - scsi_bus_new(&s->bus, sizeof(s->bus), &dev->qdev, &mptsas_scsi_info, NULL); + scsi_bus_init(&s->bus, sizeof(s->bus), &dev->qdev, &mptsas_scsi_info); } static void mptsas_scsi_uninit(PCIDevice *dev) diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c index 2a0a98cac9..77325d8cc7 100644 --- a/hw/scsi/scsi-bus.c +++ b/hw/scsi/scsi-bus.c @@ -134,10 +134,10 @@ void scsi_device_unit_attention_reported(SCSIDevice *s) } /* Create a scsi bus, and attach devices to it. */ -void scsi_bus_new(SCSIBus *bus, size_t bus_size, DeviceState *host, - const SCSIBusInfo *info, const char *bus_name) +void scsi_bus_init_named(SCSIBus *bus, size_t bus_size, DeviceState *host, + const SCSIBusInfo *info, const char *bus_name) { - qbus_create_inplace(bus, bus_size, TYPE_SCSI_BUS, host, bus_name); + qbus_init(bus, bus_size, TYPE_SCSI_BUS, host, bus_name); bus->busnr = next_scsi_bus++; bus->info = info; qbus_set_bus_hotplug_handler(BUS(bus)); diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c index e8a547dbb7..d4914178ea 100644 --- a/hw/scsi/scsi-disk.c +++ b/hw/scsi/scsi-disk.c @@ -1087,6 +1087,7 @@ static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, uint8_t *p = *p_outbuf + 2; int length; + assert(page < ARRAY_SIZE(mode_sense_valid)); if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) { return -1; } @@ -1428,6 +1429,11 @@ static int scsi_disk_check_mode_select(SCSIDiskState *s, int page, return -1; } + /* MODE_PAGE_ALLS is only valid for MODE SENSE commands */ + if (page == MODE_PAGE_ALLS) { + return -1; + } + p = mode_current; memset(mode_current, 0, inlen + 2); len = mode_sense_page(s, page, &p, 0); diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c index 665baf900e..0306ccc7b1 100644 --- a/hw/scsi/scsi-generic.c +++ b/hw/scsi/scsi-generic.c @@ -180,7 +180,7 @@ static int scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s, int len) page = r->req.cmd.buf[2]; if (page == 0xb0) { uint64_t max_transfer = blk_get_max_hw_transfer(s->conf.blk); - uint32_t max_iov = blk_get_max_iov(s->conf.blk); + uint32_t max_iov = blk_get_max_hw_iov(s->conf.blk); assert(max_transfer); max_transfer = MIN_NON_ZERO(max_transfer, max_iov * qemu_real_host_page_size) diff --git a/hw/scsi/spapr_vscsi.c b/hw/scsi/spapr_vscsi.c index c210262484..a07a8e1523 100644 --- a/hw/scsi/spapr_vscsi.c +++ b/hw/scsi/spapr_vscsi.c @@ -1216,8 +1216,7 @@ static void spapr_vscsi_realize(SpaprVioDevice *dev, Error **errp) dev->crq.SendFunc = vscsi_do_crq; - scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(dev), - &vscsi_scsi_info, NULL); + scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(dev), &vscsi_scsi_info); /* ibmvscsi SCSI bus does not allow hotplug. */ qbus_set_hotplug_handler(BUS(&s->bus), NULL); diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c index 6d80730287..51fd09522a 100644 --- a/hw/scsi/virtio-scsi.c +++ b/hw/scsi/virtio-scsi.c @@ -1019,8 +1019,8 @@ static void virtio_scsi_device_realize(DeviceState *dev, Error **errp) return; } - scsi_bus_new(&s->bus, sizeof(s->bus), dev, - &virtio_scsi_scsi_info, vdev->bus_name); + scsi_bus_init_named(&s->bus, sizeof(s->bus), dev, + &virtio_scsi_scsi_info, vdev->bus_name); /* override default SCSI bus hotplug-handler, with virtio-scsi's one */ qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(dev)); diff --git a/hw/scsi/vmw_pvscsi.c b/hw/scsi/vmw_pvscsi.c index 1f30cb020a..cd76bd67ab 100644 --- a/hw/scsi/vmw_pvscsi.c +++ b/hw/scsi/vmw_pvscsi.c @@ -1180,8 +1180,7 @@ pvscsi_realizefn(PCIDevice *pci_dev, Error **errp) s->completion_worker = qemu_bh_new(pvscsi_process_completion_queue, s); - scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(pci_dev), - &pvscsi_scsi_info, NULL); + scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(pci_dev), &pvscsi_scsi_info); /* override default SCSI bus hotplug-handler, with pvscsi's one */ qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(s)); pvscsi_reset_state(s); diff --git a/hw/sd/allwinner-sdhost.c b/hw/sd/allwinner-sdhost.c index bea6d97ef8..9166d6638d 100644 --- a/hw/sd/allwinner-sdhost.c +++ b/hw/sd/allwinner-sdhost.c @@ -738,8 +738,8 @@ static void allwinner_sdhost_init(Object *obj) { AwSdHostState *s = AW_SDHOST(obj); - qbus_create_inplace(&s->sdbus, sizeof(s->sdbus), - TYPE_AW_SDHOST_BUS, DEVICE(s), "sd-bus"); + qbus_init(&s->sdbus, sizeof(s->sdbus), + TYPE_AW_SDHOST_BUS, DEVICE(s), "sd-bus"); memory_region_init_io(&s->iomem, obj, &allwinner_sdhost_ops, s, TYPE_AW_SDHOST, 4 * KiB); diff --git a/hw/sd/aspeed_sdhci.c b/hw/sd/aspeed_sdhci.c index 3299844de6..df1bdf1fa4 100644 --- a/hw/sd/aspeed_sdhci.c +++ b/hw/sd/aspeed_sdhci.c @@ -14,6 +14,7 @@ #include "hw/irq.h" #include "migration/vmstate.h" #include "hw/qdev-properties.h" +#include "trace.h" #define ASPEED_SDHCI_INFO 0x00 #define ASPEED_SDHCI_INFO_SLOT1 (1 << 17) @@ -60,6 +61,8 @@ static uint64_t aspeed_sdhci_read(void *opaque, hwaddr addr, unsigned int size) } } + trace_aspeed_sdhci_read(addr, size, (uint64_t) val); + return (uint64_t)val; } @@ -68,6 +71,8 @@ static void aspeed_sdhci_write(void *opaque, hwaddr addr, uint64_t val, { AspeedSDHCIState *sdhci = opaque; + trace_aspeed_sdhci_write(addr, size, val); + switch (addr) { case ASPEED_SDHCI_INFO: /* The RESET bit automatically clears. */ diff --git a/hw/sd/bcm2835_sdhost.c b/hw/sd/bcm2835_sdhost.c index 50f5fdb88b..088a7ac6ed 100644 --- a/hw/sd/bcm2835_sdhost.c +++ b/hw/sd/bcm2835_sdhost.c @@ -403,8 +403,8 @@ static void bcm2835_sdhost_init(Object *obj) { BCM2835SDHostState *s = BCM2835_SDHOST(obj); - qbus_create_inplace(&s->sdbus, sizeof(s->sdbus), - TYPE_BCM2835_SDHOST_BUS, DEVICE(s), "sd-bus"); + qbus_init(&s->sdbus, sizeof(s->sdbus), + TYPE_BCM2835_SDHOST_BUS, DEVICE(s), "sd-bus"); memory_region_init_io(&s->iomem, obj, &bcm2835_sdhost_ops, s, TYPE_BCM2835_SDHOST, 0x1000); diff --git a/hw/sd/meson.build b/hw/sd/meson.build index f1ce357a3b..807ca07b7c 100644 --- a/hw/sd/meson.build +++ b/hw/sd/meson.build @@ -9,4 +9,5 @@ softmmu_ss.add(when: 'CONFIG_PXA2XX', if_true: files('pxa2xx_mmci.c')) softmmu_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_sdhost.c')) softmmu_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files('aspeed_sdhci.c')) softmmu_ss.add(when: 'CONFIG_ALLWINNER_H3', if_true: files('allwinner-sdhost.c')) +softmmu_ss.add(when: 'CONFIG_NPCM7XX', if_true: files('npcm7xx_sdhci.c')) softmmu_ss.add(when: 'CONFIG_CADENCE_SDHCI', if_true: files('cadence_sdhci.c')) diff --git a/hw/sd/npcm7xx_sdhci.c b/hw/sd/npcm7xx_sdhci.c new file mode 100644 index 0000000000..ef503365df --- /dev/null +++ b/hw/sd/npcm7xx_sdhci.c @@ -0,0 +1,182 @@ +/* + * NPCM7xx SD-3.0 / eMMC-4.51 Host Controller + * + * Copyright (c) 2021 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" + +#include "hw/sd/npcm7xx_sdhci.h" +#include "migration/vmstate.h" +#include "sdhci-internal.h" +#include "qemu/log.h" + +static uint64_t npcm7xx_sdhci_read(void *opaque, hwaddr addr, unsigned int size) +{ + NPCM7xxSDHCIState *s = opaque; + uint64_t val = 0; + + switch (addr) { + case NPCM7XX_PRSTVALS_0: + case NPCM7XX_PRSTVALS_1: + case NPCM7XX_PRSTVALS_2: + case NPCM7XX_PRSTVALS_3: + case NPCM7XX_PRSTVALS_4: + case NPCM7XX_PRSTVALS_5: + val = s->regs.prstvals[(addr - NPCM7XX_PRSTVALS_0) / 2]; + break; + case NPCM7XX_BOOTTOCTRL: + val = s->regs.boottoctrl; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "SDHCI read of nonexistent reg: 0x%02" + HWADDR_PRIx, addr); + break; + } + + return val; +} + +static void npcm7xx_sdhci_write(void *opaque, hwaddr addr, uint64_t val, + unsigned int size) +{ + NPCM7xxSDHCIState *s = opaque; + + switch (addr) { + case NPCM7XX_BOOTTOCTRL: + s->regs.boottoctrl = val; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "SDHCI write of nonexistent reg: 0x%02" + HWADDR_PRIx, addr); + break; + } +} + +static bool npcm7xx_sdhci_check_mem_op(void *opaque, hwaddr addr, + unsigned size, bool is_write, + MemTxAttrs attrs) +{ + switch (addr) { + case NPCM7XX_PRSTVALS_0: + case NPCM7XX_PRSTVALS_1: + case NPCM7XX_PRSTVALS_2: + case NPCM7XX_PRSTVALS_3: + case NPCM7XX_PRSTVALS_4: + case NPCM7XX_PRSTVALS_5: + /* RO Word */ + return !is_write && size == 2; + case NPCM7XX_BOOTTOCTRL: + /* R/W Dword */ + return size == 4; + default: + return false; + } +} + +static const MemoryRegionOps npcm7xx_sdhci_ops = { + .read = npcm7xx_sdhci_read, + .write = npcm7xx_sdhci_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + .unaligned = false, + .accepts = npcm7xx_sdhci_check_mem_op, + }, +}; + +static void npcm7xx_sdhci_realize(DeviceState *dev, Error **errp) +{ + NPCM7xxSDHCIState *s = NPCM7XX_SDHCI(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + SysBusDevice *sbd_sdhci = SYS_BUS_DEVICE(&s->sdhci); + + memory_region_init(&s->container, OBJECT(s), + "npcm7xx.sdhci-container", 0x1000); + sysbus_init_mmio(sbd, &s->container); + + memory_region_init_io(&s->iomem, OBJECT(s), &npcm7xx_sdhci_ops, s, + TYPE_NPCM7XX_SDHCI, NPCM7XX_SDHCI_REGSIZE); + memory_region_add_subregion_overlap(&s->container, NPCM7XX_PRSTVALS, + &s->iomem, 1); + + sysbus_realize(sbd_sdhci, errp); + memory_region_add_subregion(&s->container, 0, + sysbus_mmio_get_region(sbd_sdhci, 0)); + + /* propagate irq and "sd-bus" from generic-sdhci */ + sysbus_pass_irq(sbd, sbd_sdhci); + s->bus = qdev_get_child_bus(DEVICE(sbd_sdhci), "sd-bus"); + + /* Set the read only preset values. */ + memset(s->regs.prstvals, 0, sizeof(s->regs.prstvals)); + s->regs.prstvals[0] = NPCM7XX_PRSTVALS_0_RESET; + s->regs.prstvals[1] = NPCM7XX_PRSTVALS_1_RESET; + s->regs.prstvals[3] = NPCM7XX_PRSTVALS_3_RESET; +} + +static void npcm7xx_sdhci_reset(DeviceState *dev) +{ + NPCM7xxSDHCIState *s = NPCM7XX_SDHCI(dev); + device_cold_reset(DEVICE(&s->sdhci)); + s->regs.boottoctrl = 0; + + s->sdhci.prnsts = NPCM7XX_PRSNTS_RESET; + s->sdhci.blkgap = NPCM7XX_BLKGAP_RESET; + s->sdhci.capareg = NPCM7XX_CAPAB_RESET; + s->sdhci.maxcurr = NPCM7XX_MAXCURR_RESET; + s->sdhci.version = NPCM7XX_HCVER_RESET; +} + +static const VMStateDescription vmstate_npcm7xx_sdhci = { + .name = TYPE_NPCM7XX_SDHCI, + .version_id = 0, + .fields = (VMStateField[]) { + VMSTATE_UINT32(regs.boottoctrl, NPCM7xxSDHCIState), + VMSTATE_END_OF_LIST(), + }, +}; + +static void npcm7xx_sdhci_class_init(ObjectClass *classp, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(classp); + + dc->desc = "NPCM7xx SD/eMMC Host Controller"; + dc->realize = npcm7xx_sdhci_realize; + dc->reset = npcm7xx_sdhci_reset; + dc->vmsd = &vmstate_npcm7xx_sdhci; +} + +static void npcm7xx_sdhci_instance_init(Object *obj) +{ + NPCM7xxSDHCIState *s = NPCM7XX_SDHCI(obj); + + object_initialize_child(OBJECT(s), "generic-sdhci", &s->sdhci, + TYPE_SYSBUS_SDHCI); +} + +static TypeInfo npcm7xx_sdhci_info = { + .name = TYPE_NPCM7XX_SDHCI, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(NPCM7xxSDHCIState), + .instance_init = npcm7xx_sdhci_instance_init, + .class_init = npcm7xx_sdhci_class_init, +}; + +static void npcm7xx_sdhci_register_types(void) +{ + type_register_static(&npcm7xx_sdhci_info); +} + +type_init(npcm7xx_sdhci_register_types) diff --git a/hw/sd/pl181.c b/hw/sd/pl181.c index 960f155098..5e554bd467 100644 --- a/hw/sd/pl181.c +++ b/hw/sd/pl181.c @@ -506,8 +506,7 @@ static void pl181_init(Object *obj) qdev_init_gpio_out_named(dev, &s->card_readonly, "card-read-only", 1); qdev_init_gpio_out_named(dev, &s->card_inserted, "card-inserted", 1); - qbus_create_inplace(&s->sdbus, sizeof(s->sdbus), - TYPE_PL181_BUS, dev, "sd-bus"); + qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_PL181_BUS, dev, "sd-bus"); } static void pl181_class_init(ObjectClass *klass, void *data) diff --git a/hw/sd/pxa2xx_mmci.c b/hw/sd/pxa2xx_mmci.c index 3dd2fc7a83..124fbf8bbd 100644 --- a/hw/sd/pxa2xx_mmci.c +++ b/hw/sd/pxa2xx_mmci.c @@ -560,8 +560,8 @@ static void pxa2xx_mmci_instance_init(Object *obj) qdev_init_gpio_out_named(dev, &s->rx_dma, "rx-dma", 1); qdev_init_gpio_out_named(dev, &s->tx_dma, "tx-dma", 1); - qbus_create_inplace(&s->sdbus, sizeof(s->sdbus), - TYPE_PXA2XX_MMCI_BUS, DEVICE(obj), "sd-bus"); + qbus_init(&s->sdbus, sizeof(s->sdbus), + TYPE_PXA2XX_MMCI_BUS, DEVICE(obj), "sd-bus"); } static void pxa2xx_mmci_class_init(ObjectClass *klass, void *data) diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c index 5b8678110b..c9dc065cc5 100644 --- a/hw/sd/sdhci.c +++ b/hw/sd/sdhci.c @@ -1337,8 +1337,7 @@ static void sdhci_init_readonly_registers(SDHCIState *s, Error **errp) void sdhci_initfn(SDHCIState *s) { - qbus_create_inplace(&s->sdbus, sizeof(s->sdbus), - TYPE_SDHCI_BUS, DEVICE(s), "sd-bus"); + qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_SDHCI_BUS, DEVICE(s), "sd-bus"); s->insert_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_raise_insertion_irq, s); s->transfer_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, sdhci_data_transfer, s); diff --git a/hw/sd/ssi-sd.c b/hw/sd/ssi-sd.c index 97ee58e20c..e60854eeef 100644 --- a/hw/sd/ssi-sd.c +++ b/hw/sd/ssi-sd.c @@ -373,8 +373,7 @@ static void ssi_sd_realize(SSIPeripheral *d, Error **errp) DeviceState *carddev; DriveInfo *dinfo; - qbus_create_inplace(&s->sdbus, sizeof(s->sdbus), TYPE_SD_BUS, - DEVICE(d), "sd-bus"); + qbus_init(&s->sdbus, sizeof(s->sdbus), TYPE_SD_BUS, DEVICE(d), "sd-bus"); /* Create and plug in the sd card */ /* FIXME use a qdev drive property instead of drive_get_next() */ diff --git a/hw/sd/trace-events b/hw/sd/trace-events index 3cc2ef89ba..94a00557b2 100644 --- a/hw/sd/trace-events +++ b/hw/sd/trace-events @@ -68,3 +68,7 @@ pl181_fifo_push(uint32_t data) "FIFO push 0x%08" PRIx32 pl181_fifo_pop(uint32_t data) "FIFO pop 0x%08" PRIx32 pl181_fifo_transfer_complete(void) "FIFO transfer complete" pl181_data_engine_idle(void) "data engine idle" + +# aspeed_sdhci.c +aspeed_sdhci_read(uint64_t addr, uint32_t size, uint64_t data) "@0x%" PRIx64 " size %u: 0x%" PRIx64 +aspeed_sdhci_write(uint64_t addr, uint32_t size, uint64_t data) "@0x%" PRIx64 " size %u: 0x%" PRIx64 diff --git a/hw/sh4/r2d.c b/hw/sh4/r2d.c index 006010f30a..72759413f3 100644 --- a/hw/sh4/r2d.c +++ b/hw/sh4/r2d.c @@ -26,6 +26,7 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include "cpu.h" #include "hw/sysbus.h" #include "hw/sh4/sh.h" @@ -56,10 +57,10 @@ #define LINUX_LOAD_OFFSET 0x0800000 #define INITRD_LOAD_OFFSET 0x1800000 -#define PA_IRLMSK 0x00 -#define PA_POWOFF 0x30 -#define PA_VERREG 0x32 -#define PA_OUTPORT 0x36 +#define PA_IRLMSK 0x00 +#define PA_POWOFF 0x30 +#define PA_VERREG 0x32 +#define PA_OUTPORT 0x36 typedef struct { uint16_t bcr; @@ -96,38 +97,41 @@ enum r2d_fpga_irq { }; static const struct { short irl; uint16_t msk; } irqtab[NR_IRQS] = { - [CF_IDE] = { 1, 1<<9 }, - [CF_CD] = { 2, 1<<8 }, - [PCI_INTA] = { 9, 1<<14 }, - [PCI_INTB] = { 10, 1<<13 }, - [PCI_INTC] = { 3, 1<<12 }, - [PCI_INTD] = { 0, 1<<11 }, - [SM501] = { 4, 1<<10 }, - [KEY] = { 5, 1<<6 }, - [RTC_A] = { 6, 1<<5 }, - [RTC_T] = { 7, 1<<4 }, - [SDCARD] = { 8, 1<<7 }, - [EXT] = { 11, 1<<0 }, - [TP] = { 12, 1<<15 }, + [CF_IDE] = { 1, 1 << 9 }, + [CF_CD] = { 2, 1 << 8 }, + [PCI_INTA] = { 9, 1 << 14 }, + [PCI_INTB] = { 10, 1 << 13 }, + [PCI_INTC] = { 3, 1 << 12 }, + [PCI_INTD] = { 0, 1 << 11 }, + [SM501] = { 4, 1 << 10 }, + [KEY] = { 5, 1 << 6 }, + [RTC_A] = { 6, 1 << 5 }, + [RTC_T] = { 7, 1 << 4 }, + [SDCARD] = { 8, 1 << 7 }, + [EXT] = { 11, 1 << 0 }, + [TP] = { 12, 1 << 15 }, }; static void update_irl(r2d_fpga_t *fpga) { int i, irl = 15; - for (i = 0; i < NR_IRQS; i++) - if (fpga->irlmon & fpga->irlmsk & irqtab[i].msk) - if (irqtab[i].irl < irl) - irl = irqtab[i].irl; + for (i = 0; i < NR_IRQS; i++) { + if ((fpga->irlmon & fpga->irlmsk & irqtab[i].msk) && + irqtab[i].irl < irl) { + irl = irqtab[i].irl; + } + } qemu_set_irq(fpga->irl, irl ^ 15); } static void r2d_fpga_irq_set(void *opaque, int n, int level) { r2d_fpga_t *fpga = opaque; - if (level) + if (level) { fpga->irlmon |= irqtab[n].msk; - else + } else { fpga->irlmon &= ~irqtab[n].msk; + } update_irl(fpga); } @@ -306,7 +310,7 @@ static void r2d_init(MachineState *machine) /* NIC: rtl8139 on-board, and 2 slots. */ for (i = 0; i < nb_nics; i++) pci_nic_init_nofail(&nd_table[i], pci_bus, - "rtl8139", i==0 ? "2" : NULL); + "rtl8139", i == 0 ? "2" : NULL); /* USB keyboard */ usb_create_simple(usb_bus_find(-1), "usb-kbd"); @@ -321,8 +325,8 @@ static void r2d_init(MachineState *machine) SDRAM_BASE + LINUX_LOAD_OFFSET, INITRD_LOAD_OFFSET - LINUX_LOAD_OFFSET); if (kernel_size < 0) { - fprintf(stderr, "qemu: could not load kernel '%s'\n", kernel_filename); - exit(1); + error_report("qemu: could not load kernel '%s'", kernel_filename); + exit(1); } /* initialization which should be done by firmware */ @@ -330,7 +334,8 @@ static void r2d_init(MachineState *machine) MEMTXATTRS_UNSPECIFIED, NULL); /* cs3 SDRAM */ address_space_stw(&address_space_memory, SH7750_BCR2, 3 << (3 * 2), MEMTXATTRS_UNSPECIFIED, NULL); /* cs3 32bit */ - reset_info->vector = (SDRAM_BASE + LINUX_LOAD_OFFSET) | 0xa0000000; /* Start from P2 area */ + /* Start from P2 area */ + reset_info->vector = (SDRAM_BASE + LINUX_LOAD_OFFSET) | 0xa0000000; } if (initrd_filename) { @@ -341,8 +346,8 @@ static void r2d_init(MachineState *machine) SDRAM_SIZE - INITRD_LOAD_OFFSET); if (initrd_size < 0) { - fprintf(stderr, "qemu: could not load initrd '%s'\n", initrd_filename); - exit(1); + error_report("qemu: could not load initrd '%s'", initrd_filename); + exit(1); } /* initialization which should be done by firmware */ @@ -352,8 +357,10 @@ static void r2d_init(MachineState *machine) } if (kernel_cmdline) { - /* I see no evidence that this .kernel_cmdline buffer requires - NUL-termination, so using strncpy should be ok. */ + /* + * I see no evidence that this .kernel_cmdline buffer requires + * NUL-termination, so using strncpy should be ok. + */ strncpy(boot_params.kernel_cmdline, kernel_cmdline, sizeof(boot_params.kernel_cmdline)); } diff --git a/hw/sh4/sh7750.c b/hw/sh4/sh7750.c index d53a436d8c..43dfb6497b 100644 --- a/hw/sh4/sh7750.c +++ b/hw/sh4/sh7750.c @@ -24,14 +24,19 @@ */ #include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/sysbus.h" #include "hw/irq.h" #include "hw/sh4/sh.h" #include "sysemu/sysemu.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" #include "sh7750_regs.h" #include "sh7750_regnames.h" #include "hw/sh4/sh_intc.h" #include "hw/timer/tmu012.h" #include "exec/exec-all.h" +#include "trace.h" #define NB_DEVICES 4 @@ -60,17 +65,17 @@ typedef struct SH7750State { uint16_t gpioic; uint32_t pctra; uint32_t pctrb; - uint16_t portdira; /* Cached */ - uint16_t portpullupa; /* Cached */ - uint16_t portdirb; /* Cached */ - uint16_t portpullupb; /* Cached */ + uint16_t portdira; /* Cached */ + uint16_t portpullupa; /* Cached */ + uint16_t portdirb; /* Cached */ + uint16_t portpullupb; /* Cached */ uint16_t pdtra; uint16_t pdtrb; - uint16_t periph_pdtra; /* Imposed by the peripherals */ - uint16_t periph_portdira; /* Direction seen from the peripherals */ - uint16_t periph_pdtrb; /* Imposed by the peripherals */ - uint16_t periph_portdirb; /* Direction seen from the peripherals */ - sh7750_io_device *devices[NB_DEVICES]; /* External peripherals */ + uint16_t periph_pdtra; /* Imposed by the peripherals */ + uint16_t periph_portdira; /* Direction seen from the peripherals */ + uint16_t periph_pdtrb; /* Imposed by the peripherals */ + uint16_t periph_portdirb; /* Direction seen from the peripherals */ + sh7750_io_device *devices[NB_DEVICES]; /* External peripherals */ /* Cache */ uint32_t ccr; @@ -78,143 +83,145 @@ typedef struct SH7750State { struct intc_desc intc; } SH7750State; -static inline int has_bcr3_and_bcr4(SH7750State * s) +static inline int has_bcr3_and_bcr4(SH7750State *s) { return s->cpu->env.features & SH_FEATURE_BCR3_AND_BCR4; } -/********************************************************************** - I/O ports -**********************************************************************/ -int sh7750_register_io_device(SH7750State * s, sh7750_io_device * device) +/* + * I/O ports + */ + +int sh7750_register_io_device(SH7750State *s, sh7750_io_device *device) { int i; for (i = 0; i < NB_DEVICES; i++) { - if (s->devices[i] == NULL) { - s->devices[i] = device; - return 0; - } + if (s->devices[i] == NULL) { + s->devices[i] = device; + return 0; + } } return -1; } static uint16_t portdir(uint32_t v) { -#define EVENPORTMASK(n) ((v & (1<<((n)<<1))) >> (n)) +#define EVENPORTMASK(n) ((v & (1 << ((n) << 1))) >> (n)) return - EVENPORTMASK(15) | EVENPORTMASK(14) | EVENPORTMASK(13) | - EVENPORTMASK(12) | EVENPORTMASK(11) | EVENPORTMASK(10) | - EVENPORTMASK(9) | EVENPORTMASK(8) | EVENPORTMASK(7) | - EVENPORTMASK(6) | EVENPORTMASK(5) | EVENPORTMASK(4) | - EVENPORTMASK(3) | EVENPORTMASK(2) | EVENPORTMASK(1) | - EVENPORTMASK(0); + EVENPORTMASK(15) | EVENPORTMASK(14) | EVENPORTMASK(13) | + EVENPORTMASK(12) | EVENPORTMASK(11) | EVENPORTMASK(10) | + EVENPORTMASK(9) | EVENPORTMASK(8) | EVENPORTMASK(7) | + EVENPORTMASK(6) | EVENPORTMASK(5) | EVENPORTMASK(4) | + EVENPORTMASK(3) | EVENPORTMASK(2) | EVENPORTMASK(1) | + EVENPORTMASK(0); } static uint16_t portpullup(uint32_t v) { -#define ODDPORTMASK(n) ((v & (1<<(((n)<<1)+1))) >> (n)) +#define ODDPORTMASK(n) ((v & (1 << (((n) << 1) + 1))) >> (n)) return - ODDPORTMASK(15) | ODDPORTMASK(14) | ODDPORTMASK(13) | - ODDPORTMASK(12) | ODDPORTMASK(11) | ODDPORTMASK(10) | - ODDPORTMASK(9) | ODDPORTMASK(8) | ODDPORTMASK(7) | ODDPORTMASK(6) | - ODDPORTMASK(5) | ODDPORTMASK(4) | ODDPORTMASK(3) | ODDPORTMASK(2) | - ODDPORTMASK(1) | ODDPORTMASK(0); + ODDPORTMASK(15) | ODDPORTMASK(14) | ODDPORTMASK(13) | + ODDPORTMASK(12) | ODDPORTMASK(11) | ODDPORTMASK(10) | + ODDPORTMASK(9) | ODDPORTMASK(8) | ODDPORTMASK(7) | ODDPORTMASK(6) | + ODDPORTMASK(5) | ODDPORTMASK(4) | ODDPORTMASK(3) | ODDPORTMASK(2) | + ODDPORTMASK(1) | ODDPORTMASK(0); } -static uint16_t porta_lines(SH7750State * s) +static uint16_t porta_lines(SH7750State *s) { - return (s->portdira & s->pdtra) | /* CPU */ - (s->periph_portdira & s->periph_pdtra) | /* Peripherals */ - (~(s->portdira | s->periph_portdira) & s->portpullupa); /* Pullups */ + return (s->portdira & s->pdtra) | /* CPU */ + (s->periph_portdira & s->periph_pdtra) | /* Peripherals */ + (~(s->portdira | s->periph_portdira) & s->portpullupa); /* Pullups */ } -static uint16_t portb_lines(SH7750State * s) +static uint16_t portb_lines(SH7750State *s) { - return (s->portdirb & s->pdtrb) | /* CPU */ - (s->periph_portdirb & s->periph_pdtrb) | /* Peripherals */ - (~(s->portdirb | s->periph_portdirb) & s->portpullupb); /* Pullups */ + return (s->portdirb & s->pdtrb) | /* CPU */ + (s->periph_portdirb & s->periph_pdtrb) | /* Peripherals */ + (~(s->portdirb | s->periph_portdirb) & s->portpullupb); /* Pullups */ } -static void gen_port_interrupts(SH7750State * s) +static void gen_port_interrupts(SH7750State *s) { /* XXXXX interrupts not generated */ } -static void porta_changed(SH7750State * s, uint16_t prev) +static void porta_changed(SH7750State *s, uint16_t prev) { uint16_t currenta, changes; int i, r = 0; -#if 0 - fprintf(stderr, "porta changed from 0x%04x to 0x%04x\n", - prev, porta_lines(s)); - fprintf(stderr, "pdtra=0x%04x, pctra=0x%08x\n", s->pdtra, s->pctra); -#endif currenta = porta_lines(s); - if (currenta == prev) - return; + if (currenta == prev) { + return; + } + trace_sh7750_porta(prev, currenta, s->pdtra, s->pctra); changes = currenta ^ prev; for (i = 0; i < NB_DEVICES; i++) { - if (s->devices[i] && (s->devices[i]->portamask_trigger & changes)) { - r |= s->devices[i]->port_change_cb(currenta, portb_lines(s), - &s->periph_pdtra, - &s->periph_portdira, - &s->periph_pdtrb, - &s->periph_portdirb); - } + if (s->devices[i] && (s->devices[i]->portamask_trigger & changes)) { + r |= s->devices[i]->port_change_cb(currenta, portb_lines(s), + &s->periph_pdtra, + &s->periph_portdira, + &s->periph_pdtrb, + &s->periph_portdirb); + } } - if (r) - gen_port_interrupts(s); + if (r) { + gen_port_interrupts(s); + } } -static void portb_changed(SH7750State * s, uint16_t prev) +static void portb_changed(SH7750State *s, uint16_t prev) { uint16_t currentb, changes; int i, r = 0; currentb = portb_lines(s); - if (currentb == prev) - return; + if (currentb == prev) { + return; + } + trace_sh7750_portb(prev, currentb, s->pdtrb, s->pctrb); changes = currentb ^ prev; for (i = 0; i < NB_DEVICES; i++) { - if (s->devices[i] && (s->devices[i]->portbmask_trigger & changes)) { - r |= s->devices[i]->port_change_cb(portb_lines(s), currentb, - &s->periph_pdtra, - &s->periph_portdira, - &s->periph_pdtrb, - &s->periph_portdirb); - } + if (s->devices[i] && (s->devices[i]->portbmask_trigger & changes)) { + r |= s->devices[i]->port_change_cb(portb_lines(s), currentb, + &s->periph_pdtra, + &s->periph_portdira, + &s->periph_pdtrb, + &s->periph_portdirb); + } } - if (r) - gen_port_interrupts(s); + if (r) { + gen_port_interrupts(s); + } } -/********************************************************************** - Memory -**********************************************************************/ +/* + * Memory + */ static void error_access(const char *kind, hwaddr addr) { fprintf(stderr, "%s to %s (0x" TARGET_FMT_plx ") not supported\n", - kind, regname(addr), addr); + kind, regname(addr), addr); } static void ignore_access(const char *kind, hwaddr addr) { fprintf(stderr, "%s to %s (0x" TARGET_FMT_plx ") ignored\n", - kind, regname(addr), addr); + kind, regname(addr), addr); } static uint32_t sh7750_mem_readb(void *opaque, hwaddr addr) { switch (addr) { default: - error_access("byte read", addr); + error_access("byte read", addr); abort(); } } @@ -225,30 +232,31 @@ static uint32_t sh7750_mem_readw(void *opaque, hwaddr addr) switch (addr) { case SH7750_BCR2_A7: - return s->bcr2; + return s->bcr2; case SH7750_BCR3_A7: - if(!has_bcr3_and_bcr4(s)) - error_access("word read", addr); - return s->bcr3; + if (!has_bcr3_and_bcr4(s)) { + error_access("word read", addr); + } + return s->bcr3; case SH7750_FRQCR_A7: - return 0; + return 0; case SH7750_PCR_A7: - return s->pcr; + return s->pcr; case SH7750_RFCR_A7: - fprintf(stderr, - "Read access to refresh count register, incrementing\n"); - return s->rfcr++; + fprintf(stderr, + "Read access to refresh count register, incrementing\n"); + return s->rfcr++; case SH7750_PDTRA_A7: - return porta_lines(s); + return porta_lines(s); case SH7750_PDTRB_A7: - return portb_lines(s); + return portb_lines(s); case SH7750_RTCOR_A7: case SH7750_RTCNT_A7: case SH7750_RTCSR_A7: - ignore_access("word read", addr); - return 0; + ignore_access("word read", addr); + return 0; default: - error_access("word read", addr); + error_access("word read", addr); abort(); } } @@ -260,11 +268,12 @@ static uint32_t sh7750_mem_readl(void *opaque, hwaddr addr) switch (addr) { case SH7750_BCR1_A7: - return s->bcr1; + return s->bcr1; case SH7750_BCR4_A7: - if(!has_bcr3_and_bcr4(s)) - error_access("long read", addr); - return s->bcr4; + if (!has_bcr3_and_bcr4(s)) { + error_access("long read", addr); + } + return s->bcr4; case SH7750_WCR1_A7: case SH7750_WCR2_A7: case SH7750_WCR3_A7: @@ -288,31 +297,31 @@ static uint32_t sh7750_mem_readl(void *opaque, hwaddr addr) case SH7750_INTEVT_A7: return s->cpu->env.intevt; case SH7750_CCR_A7: - return s->ccr; - case 0x1f000030: /* Processor version */ + return s->ccr; + case 0x1f000030: /* Processor version */ scc = SUPERH_CPU_GET_CLASS(s->cpu); return scc->pvr; - case 0x1f000040: /* Cache version */ + case 0x1f000040: /* Cache version */ scc = SUPERH_CPU_GET_CLASS(s->cpu); return scc->cvr; - case 0x1f000044: /* Processor revision */ + case 0x1f000044: /* Processor revision */ scc = SUPERH_CPU_GET_CLASS(s->cpu); return scc->prr; default: - error_access("long read", addr); + error_access("long read", addr); abort(); } } #define is_in_sdrmx(a, x) (a >= SH7750_SDMR ## x ## _A7 \ - && a <= (SH7750_SDMR ## x ## _A7 + SH7750_SDMR ## x ## _REGNB)) + && a <= (SH7750_SDMR ## x ## _A7 + SH7750_SDMR ## x ## _REGNB)) static void sh7750_mem_writeb(void *opaque, hwaddr addr, - uint32_t mem_value) + uint32_t mem_value) { if (is_in_sdrmx(addr, 2) || is_in_sdrmx(addr, 3)) { - ignore_access("byte write", addr); - return; + ignore_access("byte write", addr); + return; } error_access("byte write", addr); @@ -320,94 +329,96 @@ static void sh7750_mem_writeb(void *opaque, hwaddr addr, } static void sh7750_mem_writew(void *opaque, hwaddr addr, - uint32_t mem_value) + uint32_t mem_value) { SH7750State *s = opaque; uint16_t temp; switch (addr) { - /* SDRAM controller */ + /* SDRAM controller */ case SH7750_BCR2_A7: s->bcr2 = mem_value; return; case SH7750_BCR3_A7: - if(!has_bcr3_and_bcr4(s)) - error_access("word write", addr); - s->bcr3 = mem_value; - return; + if (!has_bcr3_and_bcr4(s)) { + error_access("word write", addr); + } + s->bcr3 = mem_value; + return; case SH7750_PCR_A7: - s->pcr = mem_value; - return; + s->pcr = mem_value; + return; case SH7750_RTCNT_A7: case SH7750_RTCOR_A7: case SH7750_RTCSR_A7: - ignore_access("word write", addr); - return; - /* IO ports */ + ignore_access("word write", addr); + return; + /* IO ports */ case SH7750_PDTRA_A7: - temp = porta_lines(s); - s->pdtra = mem_value; - porta_changed(s, temp); - return; + temp = porta_lines(s); + s->pdtra = mem_value; + porta_changed(s, temp); + return; case SH7750_PDTRB_A7: - temp = portb_lines(s); - s->pdtrb = mem_value; - portb_changed(s, temp); - return; + temp = portb_lines(s); + s->pdtrb = mem_value; + portb_changed(s, temp); + return; case SH7750_RFCR_A7: - fprintf(stderr, "Write access to refresh count register\n"); - s->rfcr = mem_value; - return; + fprintf(stderr, "Write access to refresh count register\n"); + s->rfcr = mem_value; + return; case SH7750_GPIOIC_A7: - s->gpioic = mem_value; - if (mem_value != 0) { - fprintf(stderr, "I/O interrupts not implemented\n"); + s->gpioic = mem_value; + if (mem_value != 0) { + fprintf(stderr, "I/O interrupts not implemented\n"); abort(); - } - return; + } + return; default: - error_access("word write", addr); + error_access("word write", addr); abort(); } } static void sh7750_mem_writel(void *opaque, hwaddr addr, - uint32_t mem_value) + uint32_t mem_value) { SH7750State *s = opaque; uint16_t temp; switch (addr) { - /* SDRAM controller */ + /* SDRAM controller */ case SH7750_BCR1_A7: s->bcr1 = mem_value; return; case SH7750_BCR4_A7: - if(!has_bcr3_and_bcr4(s)) - error_access("long write", addr); - s->bcr4 = mem_value; - return; + if (!has_bcr3_and_bcr4(s)) { + error_access("long write", addr); + } + s->bcr4 = mem_value; + return; case SH7750_WCR1_A7: case SH7750_WCR2_A7: case SH7750_WCR3_A7: case SH7750_MCR_A7: - ignore_access("long write", addr); - return; - /* IO ports */ + ignore_access("long write", addr); + return; + /* IO ports */ case SH7750_PCTRA_A7: - temp = porta_lines(s); - s->pctra = mem_value; - s->portdira = portdir(mem_value); - s->portpullupa = portpullup(mem_value); - porta_changed(s, temp); - return; + temp = porta_lines(s); + s->pctra = mem_value; + s->portdira = portdir(mem_value); + s->portpullupa = portpullup(mem_value); + porta_changed(s, temp); + return; case SH7750_PCTRB_A7: - temp = portb_lines(s); - s->pctrb = mem_value; - s->portdirb = portdir(mem_value); - s->portpullupb = portpullup(mem_value); - portb_changed(s, temp); - return; + temp = portb_lines(s); + s->pctrb = mem_value; + s->portdirb = portdir(mem_value); + s->portpullupb = portpullup(mem_value); + portb_changed(s, temp); + return; case SH7750_MMUCR_A7: if (mem_value & MMUCR_TI) { cpu_sh4_invalidate_tlb(&s->cpu->env); @@ -443,10 +454,10 @@ static void sh7750_mem_writel(void *opaque, hwaddr addr, s->cpu->env.intevt = mem_value & 0x000007ff; return; case SH7750_CCR_A7: - s->ccr = mem_value; - return; + s->ccr = mem_value; + return; default: - error_access("long write", addr); + error_access("long write", addr); abort(); } } @@ -491,161 +502,161 @@ static const MemoryRegionOps sh7750_mem_ops = { .endianness = DEVICE_NATIVE_ENDIAN, }; -/* sh775x interrupt controller tables for sh_intc.c +/* + * sh775x interrupt controller tables for sh_intc.c * stolen from linux/arch/sh/kernel/cpu/sh4/setup-sh7750.c */ enum { - UNUSED = 0, + UNUSED = 0, - /* interrupt sources */ - IRL_0, IRL_1, IRL_2, IRL_3, IRL_4, IRL_5, IRL_6, IRL_7, - IRL_8, IRL_9, IRL_A, IRL_B, IRL_C, IRL_D, IRL_E, - IRL0, IRL1, IRL2, IRL3, - HUDI, GPIOI, - DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2, DMAC_DMTE3, - DMAC_DMTE4, DMAC_DMTE5, DMAC_DMTE6, DMAC_DMTE7, - DMAC_DMAE, - PCIC0_PCISERR, PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON, - PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, PCIC1_PCIDMA3, - TMU3, TMU4, TMU0, TMU1, TMU2_TUNI, TMU2_TICPI, - RTC_ATI, RTC_PRI, RTC_CUI, - SCI1_ERI, SCI1_RXI, SCI1_TXI, SCI1_TEI, - SCIF_ERI, SCIF_RXI, SCIF_BRI, SCIF_TXI, - WDT, - REF_RCMI, REF_ROVI, + /* interrupt sources */ + IRL_0, IRL_1, IRL_2, IRL_3, IRL_4, IRL_5, IRL_6, IRL_7, + IRL_8, IRL_9, IRL_A, IRL_B, IRL_C, IRL_D, IRL_E, + IRL0, IRL1, IRL2, IRL3, + HUDI, GPIOI, + DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2, DMAC_DMTE3, + DMAC_DMTE4, DMAC_DMTE5, DMAC_DMTE6, DMAC_DMTE7, + DMAC_DMAE, + PCIC0_PCISERR, PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON, + PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, PCIC1_PCIDMA3, + TMU3, TMU4, TMU0, TMU1, TMU2_TUNI, TMU2_TICPI, + RTC_ATI, RTC_PRI, RTC_CUI, + SCI1_ERI, SCI1_RXI, SCI1_TXI, SCI1_TEI, + SCIF_ERI, SCIF_RXI, SCIF_BRI, SCIF_TXI, + WDT, + REF_RCMI, REF_ROVI, - /* interrupt groups */ - DMAC, PCIC1, TMU2, RTC, SCI1, SCIF, REF, - /* irl bundle */ - IRL, + /* interrupt groups */ + DMAC, PCIC1, TMU2, RTC, SCI1, SCIF, REF, + /* irl bundle */ + IRL, - NR_SOURCES, + NR_SOURCES, }; static struct intc_vect vectors[] = { - INTC_VECT(HUDI, 0x600), INTC_VECT(GPIOI, 0x620), - INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), - INTC_VECT(TMU2_TUNI, 0x440), INTC_VECT(TMU2_TICPI, 0x460), - INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0), - INTC_VECT(RTC_CUI, 0x4c0), - INTC_VECT(SCI1_ERI, 0x4e0), INTC_VECT(SCI1_RXI, 0x500), - INTC_VECT(SCI1_TXI, 0x520), INTC_VECT(SCI1_TEI, 0x540), - INTC_VECT(SCIF_ERI, 0x700), INTC_VECT(SCIF_RXI, 0x720), - INTC_VECT(SCIF_BRI, 0x740), INTC_VECT(SCIF_TXI, 0x760), - INTC_VECT(WDT, 0x560), - INTC_VECT(REF_RCMI, 0x580), INTC_VECT(REF_ROVI, 0x5a0), + INTC_VECT(HUDI, 0x600), INTC_VECT(GPIOI, 0x620), + INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), + INTC_VECT(TMU2_TUNI, 0x440), INTC_VECT(TMU2_TICPI, 0x460), + INTC_VECT(RTC_ATI, 0x480), INTC_VECT(RTC_PRI, 0x4a0), + INTC_VECT(RTC_CUI, 0x4c0), + INTC_VECT(SCI1_ERI, 0x4e0), INTC_VECT(SCI1_RXI, 0x500), + INTC_VECT(SCI1_TXI, 0x520), INTC_VECT(SCI1_TEI, 0x540), + INTC_VECT(SCIF_ERI, 0x700), INTC_VECT(SCIF_RXI, 0x720), + INTC_VECT(SCIF_BRI, 0x740), INTC_VECT(SCIF_TXI, 0x760), + INTC_VECT(WDT, 0x560), + INTC_VECT(REF_RCMI, 0x580), INTC_VECT(REF_ROVI, 0x5a0), }; static struct intc_group groups[] = { - INTC_GROUP(TMU2, TMU2_TUNI, TMU2_TICPI), - INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI), - INTC_GROUP(SCI1, SCI1_ERI, SCI1_RXI, SCI1_TXI, SCI1_TEI), - INTC_GROUP(SCIF, SCIF_ERI, SCIF_RXI, SCIF_BRI, SCIF_TXI), - INTC_GROUP(REF, REF_RCMI, REF_ROVI), + INTC_GROUP(TMU2, TMU2_TUNI, TMU2_TICPI), + INTC_GROUP(RTC, RTC_ATI, RTC_PRI, RTC_CUI), + INTC_GROUP(SCI1, SCI1_ERI, SCI1_RXI, SCI1_TXI, SCI1_TEI), + INTC_GROUP(SCIF, SCIF_ERI, SCIF_RXI, SCIF_BRI, SCIF_TXI), + INTC_GROUP(REF, REF_RCMI, REF_ROVI), }; static struct intc_prio_reg prio_registers[] = { - { 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } }, - { 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, REF, SCI1, 0 } }, - { 0xffd0000c, 0, 16, 4, /* IPRC */ { GPIOI, DMAC, SCIF, HUDI } }, - { 0xffd00010, 0, 16, 4, /* IPRD */ { IRL0, IRL1, IRL2, IRL3 } }, - { 0xfe080000, 0, 32, 4, /* INTPRI00 */ { 0, 0, 0, 0, - TMU4, TMU3, - PCIC1, PCIC0_PCISERR } }, + { 0xffd00004, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2, RTC } }, + { 0xffd00008, 0, 16, 4, /* IPRB */ { WDT, REF, SCI1, 0 } }, + { 0xffd0000c, 0, 16, 4, /* IPRC */ { GPIOI, DMAC, SCIF, HUDI } }, + { 0xffd00010, 0, 16, 4, /* IPRD */ { IRL0, IRL1, IRL2, IRL3 } }, + { 0xfe080000, 0, 32, 4, /* INTPRI00 */ { 0, 0, 0, 0, TMU4, TMU3, + PCIC1, PCIC0_PCISERR } }, }; /* SH7750, SH7750S, SH7751 and SH7091 all have 4-channel DMA controllers */ static struct intc_vect vectors_dma4[] = { - INTC_VECT(DMAC_DMTE0, 0x640), INTC_VECT(DMAC_DMTE1, 0x660), - INTC_VECT(DMAC_DMTE2, 0x680), INTC_VECT(DMAC_DMTE3, 0x6a0), - INTC_VECT(DMAC_DMAE, 0x6c0), + INTC_VECT(DMAC_DMTE0, 0x640), INTC_VECT(DMAC_DMTE1, 0x660), + INTC_VECT(DMAC_DMTE2, 0x680), INTC_VECT(DMAC_DMTE3, 0x6a0), + INTC_VECT(DMAC_DMAE, 0x6c0), }; static struct intc_group groups_dma4[] = { - INTC_GROUP(DMAC, DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2, - DMAC_DMTE3, DMAC_DMAE), + INTC_GROUP(DMAC, DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2, + DMAC_DMTE3, DMAC_DMAE), }; /* SH7750R and SH7751R both have 8-channel DMA controllers */ static struct intc_vect vectors_dma8[] = { - INTC_VECT(DMAC_DMTE0, 0x640), INTC_VECT(DMAC_DMTE1, 0x660), - INTC_VECT(DMAC_DMTE2, 0x680), INTC_VECT(DMAC_DMTE3, 0x6a0), - INTC_VECT(DMAC_DMTE4, 0x780), INTC_VECT(DMAC_DMTE5, 0x7a0), - INTC_VECT(DMAC_DMTE6, 0x7c0), INTC_VECT(DMAC_DMTE7, 0x7e0), - INTC_VECT(DMAC_DMAE, 0x6c0), + INTC_VECT(DMAC_DMTE0, 0x640), INTC_VECT(DMAC_DMTE1, 0x660), + INTC_VECT(DMAC_DMTE2, 0x680), INTC_VECT(DMAC_DMTE3, 0x6a0), + INTC_VECT(DMAC_DMTE4, 0x780), INTC_VECT(DMAC_DMTE5, 0x7a0), + INTC_VECT(DMAC_DMTE6, 0x7c0), INTC_VECT(DMAC_DMTE7, 0x7e0), + INTC_VECT(DMAC_DMAE, 0x6c0), }; static struct intc_group groups_dma8[] = { - INTC_GROUP(DMAC, DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2, - DMAC_DMTE3, DMAC_DMTE4, DMAC_DMTE5, - DMAC_DMTE6, DMAC_DMTE7, DMAC_DMAE), + INTC_GROUP(DMAC, DMAC_DMTE0, DMAC_DMTE1, DMAC_DMTE2, + DMAC_DMTE3, DMAC_DMTE4, DMAC_DMTE5, + DMAC_DMTE6, DMAC_DMTE7, DMAC_DMAE), }; /* SH7750R, SH7751 and SH7751R all have two extra timer channels */ static struct intc_vect vectors_tmu34[] = { - INTC_VECT(TMU3, 0xb00), INTC_VECT(TMU4, 0xb80), + INTC_VECT(TMU3, 0xb00), INTC_VECT(TMU4, 0xb80), }; static struct intc_mask_reg mask_registers[] = { - { 0xfe080040, 0xfe080060, 32, /* INTMSK00 / INTMSKCLR00 */ - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, TMU4, TMU3, - PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON, - PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, - PCIC1_PCIDMA3, PCIC0_PCISERR } }, + { 0xfe080040, 0xfe080060, 32, /* INTMSK00 / INTMSKCLR00 */ + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, TMU4, TMU3, + PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON, + PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, + PCIC1_PCIDMA3, PCIC0_PCISERR } }, }; /* SH7750S, SH7750R, SH7751 and SH7751R all have IRLM priority registers */ static struct intc_vect vectors_irlm[] = { - INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0), - INTC_VECT(IRL2, 0x300), INTC_VECT(IRL3, 0x360), + INTC_VECT(IRL0, 0x240), INTC_VECT(IRL1, 0x2a0), + INTC_VECT(IRL2, 0x300), INTC_VECT(IRL3, 0x360), }; /* SH7751 and SH7751R both have PCI */ static struct intc_vect vectors_pci[] = { - INTC_VECT(PCIC0_PCISERR, 0xa00), INTC_VECT(PCIC1_PCIERR, 0xae0), - INTC_VECT(PCIC1_PCIPWDWN, 0xac0), INTC_VECT(PCIC1_PCIPWON, 0xaa0), - INTC_VECT(PCIC1_PCIDMA0, 0xa80), INTC_VECT(PCIC1_PCIDMA1, 0xa60), - INTC_VECT(PCIC1_PCIDMA2, 0xa40), INTC_VECT(PCIC1_PCIDMA3, 0xa20), + INTC_VECT(PCIC0_PCISERR, 0xa00), INTC_VECT(PCIC1_PCIERR, 0xae0), + INTC_VECT(PCIC1_PCIPWDWN, 0xac0), INTC_VECT(PCIC1_PCIPWON, 0xaa0), + INTC_VECT(PCIC1_PCIDMA0, 0xa80), INTC_VECT(PCIC1_PCIDMA1, 0xa60), + INTC_VECT(PCIC1_PCIDMA2, 0xa40), INTC_VECT(PCIC1_PCIDMA3, 0xa20), }; static struct intc_group groups_pci[] = { - INTC_GROUP(PCIC1, PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON, - PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, PCIC1_PCIDMA3), + INTC_GROUP(PCIC1, PCIC1_PCIERR, PCIC1_PCIPWDWN, PCIC1_PCIPWON, + PCIC1_PCIDMA0, PCIC1_PCIDMA1, PCIC1_PCIDMA2, PCIC1_PCIDMA3), }; static struct intc_vect vectors_irl[] = { - INTC_VECT(IRL_0, 0x200), - INTC_VECT(IRL_1, 0x220), - INTC_VECT(IRL_2, 0x240), - INTC_VECT(IRL_3, 0x260), - INTC_VECT(IRL_4, 0x280), - INTC_VECT(IRL_5, 0x2a0), - INTC_VECT(IRL_6, 0x2c0), - INTC_VECT(IRL_7, 0x2e0), - INTC_VECT(IRL_8, 0x300), - INTC_VECT(IRL_9, 0x320), - INTC_VECT(IRL_A, 0x340), - INTC_VECT(IRL_B, 0x360), - INTC_VECT(IRL_C, 0x380), - INTC_VECT(IRL_D, 0x3a0), - INTC_VECT(IRL_E, 0x3c0), + INTC_VECT(IRL_0, 0x200), + INTC_VECT(IRL_1, 0x220), + INTC_VECT(IRL_2, 0x240), + INTC_VECT(IRL_3, 0x260), + INTC_VECT(IRL_4, 0x280), + INTC_VECT(IRL_5, 0x2a0), + INTC_VECT(IRL_6, 0x2c0), + INTC_VECT(IRL_7, 0x2e0), + INTC_VECT(IRL_8, 0x300), + INTC_VECT(IRL_9, 0x320), + INTC_VECT(IRL_A, 0x340), + INTC_VECT(IRL_B, 0x360), + INTC_VECT(IRL_C, 0x380), + INTC_VECT(IRL_D, 0x3a0), + INTC_VECT(IRL_E, 0x3c0), }; static struct intc_group groups_irl[] = { - INTC_GROUP(IRL, IRL_0, IRL_1, IRL_2, IRL_3, IRL_4, IRL_5, IRL_6, - IRL_7, IRL_8, IRL_9, IRL_A, IRL_B, IRL_C, IRL_D, IRL_E), + INTC_GROUP(IRL, IRL_0, IRL_1, IRL_2, IRL_3, IRL_4, IRL_5, IRL_6, + IRL_7, IRL_8, IRL_9, IRL_A, IRL_B, IRL_C, IRL_D, IRL_E), }; -/********************************************************************** - Memory mapped cache and TLB -**********************************************************************/ +/* + * Memory mapped cache and TLB + */ #define MM_REGION_MASK 0x07000000 #define MM_ICACHE_ADDR (0) @@ -679,7 +690,7 @@ static uint64_t sh7750_mmct_read(void *opaque, hwaddr addr, case MM_ICACHE_ADDR: case MM_ICACHE_DATA: /* do nothing */ - break; + break; case MM_ITLB_ADDR: ret = cpu_sh4_read_mmaped_itlb_addr(&s->cpu->env, addr); break; @@ -689,7 +700,7 @@ static uint64_t sh7750_mmct_read(void *opaque, hwaddr addr, case MM_OCACHE_ADDR: case MM_OCACHE_DATA: /* do nothing */ - break; + break; case MM_UTLB_ADDR: ret = cpu_sh4_read_mmaped_utlb_addr(&s->cpu->env, addr); break; @@ -722,27 +733,27 @@ static void sh7750_mmct_write(void *opaque, hwaddr addr, case MM_ICACHE_ADDR: case MM_ICACHE_DATA: /* do nothing */ - break; + break; case MM_ITLB_ADDR: cpu_sh4_write_mmaped_itlb_addr(&s->cpu->env, addr, mem_value); break; case MM_ITLB_DATA: cpu_sh4_write_mmaped_itlb_data(&s->cpu->env, addr, mem_value); abort(); - break; + break; case MM_OCACHE_ADDR: case MM_OCACHE_DATA: /* do nothing */ - break; + break; case MM_UTLB_ADDR: cpu_sh4_write_mmaped_utlb_addr(&s->cpu->env, addr, mem_value); - break; + break; case MM_UTLB_DATA: cpu_sh4_write_mmaped_utlb_data(&s->cpu->env, addr, mem_value); - break; + break; default: abort(); - break; + break; } } @@ -755,10 +766,13 @@ static const MemoryRegionOps sh7750_mmct_ops = { SH7750State *sh7750_init(SuperHCPU *cpu, MemoryRegion *sysmem) { SH7750State *s; + DeviceState *dev; + SysBusDevice *sb; + MemoryRegion *mr, *alias; s = g_malloc0(sizeof(SH7750State)); s->cpu = cpu; - s->periph_freq = 60000000; /* 60MHz */ + s->periph_freq = 60000000; /* 60MHz */ memory_region_init_io(&s->iomem, NULL, &sh7750_mem_ops, s, "memory", 0x1fc01000); @@ -791,81 +805,100 @@ SH7750State *sh7750_init(SuperHCPU *cpu, MemoryRegion *sysmem) memory_region_add_subregion(sysmem, 0xf0000000, &s->mmct_iomem); sh_intc_init(sysmem, &s->intc, NR_SOURCES, - _INTC_ARRAY(mask_registers), - _INTC_ARRAY(prio_registers)); + _INTC_ARRAY(mask_registers), + _INTC_ARRAY(prio_registers)); sh_intc_register_sources(&s->intc, - _INTC_ARRAY(vectors), - _INTC_ARRAY(groups)); + _INTC_ARRAY(vectors), + _INTC_ARRAY(groups)); cpu->env.intc_handle = &s->intc; - sh_serial_init(sysmem, 0x1fe00000, - 0, s->periph_freq, serial_hd(0), - s->intc.irqs[SCI1_ERI], - s->intc.irqs[SCI1_RXI], - s->intc.irqs[SCI1_TXI], - s->intc.irqs[SCI1_TEI], - NULL); - sh_serial_init(sysmem, 0x1fe80000, - SH_SERIAL_FEAT_SCIF, - s->periph_freq, serial_hd(1), - s->intc.irqs[SCIF_ERI], - s->intc.irqs[SCIF_RXI], - s->intc.irqs[SCIF_TXI], - NULL, - s->intc.irqs[SCIF_BRI]); + /* SCI */ + dev = qdev_new(TYPE_SH_SERIAL); + dev->id = g_strdup("sci"); + qdev_prop_set_chr(dev, "chardev", serial_hd(0)); + sb = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sb, &error_fatal); + sysbus_mmio_map(sb, 0, 0xffe00000); + alias = g_malloc(sizeof(*alias)); + mr = sysbus_mmio_get_region(sb, 0); + memory_region_init_alias(alias, OBJECT(dev), "sci-a7", mr, + 0, memory_region_size(mr)); + memory_region_add_subregion(sysmem, A7ADDR(0xffe00000), alias); + qdev_connect_gpio_out_named(dev, "eri", 0, s->intc.irqs[SCI1_ERI]); + qdev_connect_gpio_out_named(dev, "rxi", 0, s->intc.irqs[SCI1_RXI]); + qdev_connect_gpio_out_named(dev, "txi", 0, s->intc.irqs[SCI1_TXI]); + qdev_connect_gpio_out_named(dev, "tei", 0, s->intc.irqs[SCI1_TEI]); + + /* SCIF */ + dev = qdev_new(TYPE_SH_SERIAL); + dev->id = g_strdup("scif"); + qdev_prop_set_chr(dev, "chardev", serial_hd(1)); + qdev_prop_set_uint8(dev, "features", SH_SERIAL_FEAT_SCIF); + sb = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sb, &error_fatal); + sysbus_mmio_map(sb, 0, 0xffe80000); + alias = g_malloc(sizeof(*alias)); + mr = sysbus_mmio_get_region(sb, 0); + memory_region_init_alias(alias, OBJECT(dev), "scif-a7", mr, + 0, memory_region_size(mr)); + memory_region_add_subregion(sysmem, A7ADDR(0xffe80000), alias); + qdev_connect_gpio_out_named(dev, "eri", 0, s->intc.irqs[SCIF_ERI]); + qdev_connect_gpio_out_named(dev, "rxi", 0, s->intc.irqs[SCIF_RXI]); + qdev_connect_gpio_out_named(dev, "txi", 0, s->intc.irqs[SCIF_TXI]); + qdev_connect_gpio_out_named(dev, "bri", 0, s->intc.irqs[SCIF_BRI]); tmu012_init(sysmem, 0x1fd80000, - TMU012_FEAT_TOCR | TMU012_FEAT_3CHAN | TMU012_FEAT_EXTCLK, - s->periph_freq, - s->intc.irqs[TMU0], - s->intc.irqs[TMU1], - s->intc.irqs[TMU2_TUNI], - s->intc.irqs[TMU2_TICPI]); + TMU012_FEAT_TOCR | TMU012_FEAT_3CHAN | TMU012_FEAT_EXTCLK, + s->periph_freq, + s->intc.irqs[TMU0], + s->intc.irqs[TMU1], + s->intc.irqs[TMU2_TUNI], + s->intc.irqs[TMU2_TICPI]); if (cpu->env.id & (SH_CPU_SH7750 | SH_CPU_SH7750S | SH_CPU_SH7751)) { sh_intc_register_sources(&s->intc, - _INTC_ARRAY(vectors_dma4), - _INTC_ARRAY(groups_dma4)); + _INTC_ARRAY(vectors_dma4), + _INTC_ARRAY(groups_dma4)); } if (cpu->env.id & (SH_CPU_SH7750R | SH_CPU_SH7751R)) { sh_intc_register_sources(&s->intc, - _INTC_ARRAY(vectors_dma8), - _INTC_ARRAY(groups_dma8)); + _INTC_ARRAY(vectors_dma8), + _INTC_ARRAY(groups_dma8)); } if (cpu->env.id & (SH_CPU_SH7750R | SH_CPU_SH7751 | SH_CPU_SH7751R)) { sh_intc_register_sources(&s->intc, - _INTC_ARRAY(vectors_tmu34), - NULL, 0); + _INTC_ARRAY(vectors_tmu34), + NULL, 0); tmu012_init(sysmem, 0x1e100000, 0, s->periph_freq, - s->intc.irqs[TMU3], - s->intc.irqs[TMU4], - NULL, NULL); + s->intc.irqs[TMU3], + s->intc.irqs[TMU4], + NULL, NULL); } if (cpu->env.id & (SH_CPU_SH7751_ALL)) { sh_intc_register_sources(&s->intc, - _INTC_ARRAY(vectors_pci), - _INTC_ARRAY(groups_pci)); + _INTC_ARRAY(vectors_pci), + _INTC_ARRAY(groups_pci)); } if (cpu->env.id & (SH_CPU_SH7750S | SH_CPU_SH7750R | SH_CPU_SH7751_ALL)) { sh_intc_register_sources(&s->intc, - _INTC_ARRAY(vectors_irlm), - NULL, 0); + _INTC_ARRAY(vectors_irlm), + NULL, 0); } sh_intc_register_sources(&s->intc, - _INTC_ARRAY(vectors_irl), - _INTC_ARRAY(groups_irl)); + _INTC_ARRAY(vectors_irl), + _INTC_ARRAY(groups_irl)); return s; } qemu_irq sh7750_irl(SH7750State *s) { - sh_intc_toggle_source(sh_intc_source(&s->intc, IRL), 1, 0); /* enable */ - return qemu_allocate_irq(sh_intc_set_irl, sh_intc_source(&s->intc, IRL), 0); + sh_intc_toggle_source(&s->intc.sources[IRL], 1, 0); /* enable */ + return qemu_allocate_irq(sh_intc_set_irl, &s->intc.sources[IRL], 0); } diff --git a/hw/sh4/sh7750_regnames.c b/hw/sh4/sh7750_regnames.c index 0630fe3cf4..e531d46a8e 100644 --- a/hw/sh4/sh7750_regnames.c +++ b/hw/sh4/sh7750_regnames.c @@ -12,85 +12,87 @@ typedef struct { static regname_t regnames[] = { REGNAME(SH7750_PTEH_A7) - REGNAME(SH7750_PTEL_A7) - REGNAME(SH7750_PTEA_A7) - REGNAME(SH7750_TTB_A7) - REGNAME(SH7750_TEA_A7) - REGNAME(SH7750_MMUCR_A7) - REGNAME(SH7750_CCR_A7) - REGNAME(SH7750_QACR0_A7) - REGNAME(SH7750_QACR1_A7) - REGNAME(SH7750_TRA_A7) - REGNAME(SH7750_EXPEVT_A7) - REGNAME(SH7750_INTEVT_A7) - REGNAME(SH7750_STBCR_A7) - REGNAME(SH7750_STBCR2_A7) - REGNAME(SH7750_FRQCR_A7) - REGNAME(SH7750_WTCNT_A7) - REGNAME(SH7750_WTCSR_A7) - REGNAME(SH7750_R64CNT_A7) - REGNAME(SH7750_RSECCNT_A7) - REGNAME(SH7750_RMINCNT_A7) - REGNAME(SH7750_RHRCNT_A7) - REGNAME(SH7750_RWKCNT_A7) - REGNAME(SH7750_RDAYCNT_A7) - REGNAME(SH7750_RMONCNT_A7) - REGNAME(SH7750_RYRCNT_A7) - REGNAME(SH7750_RSECAR_A7) - REGNAME(SH7750_RMINAR_A7) - REGNAME(SH7750_RHRAR_A7) - REGNAME(SH7750_RWKAR_A7) - REGNAME(SH7750_RDAYAR_A7) - REGNAME(SH7750_RMONAR_A7) - REGNAME(SH7750_RCR1_A7) - REGNAME(SH7750_RCR2_A7) - REGNAME(SH7750_BCR1_A7) - REGNAME(SH7750_BCR2_A7) - REGNAME(SH7750_WCR1_A7) - REGNAME(SH7750_WCR2_A7) - REGNAME(SH7750_WCR3_A7) - REGNAME(SH7750_MCR_A7) - REGNAME(SH7750_PCR_A7) - REGNAME(SH7750_RTCSR_A7) - REGNAME(SH7750_RTCNT_A7) - REGNAME(SH7750_RTCOR_A7) - REGNAME(SH7750_RFCR_A7) - REGNAME(SH7750_SAR0_A7) - REGNAME(SH7750_SAR1_A7) - REGNAME(SH7750_SAR2_A7) - REGNAME(SH7750_SAR3_A7) - REGNAME(SH7750_DAR0_A7) - REGNAME(SH7750_DAR1_A7) - REGNAME(SH7750_DAR2_A7) - REGNAME(SH7750_DAR3_A7) - REGNAME(SH7750_DMATCR0_A7) - REGNAME(SH7750_DMATCR1_A7) - REGNAME(SH7750_DMATCR2_A7) - REGNAME(SH7750_DMATCR3_A7) - REGNAME(SH7750_CHCR0_A7) - REGNAME(SH7750_CHCR1_A7) - REGNAME(SH7750_CHCR2_A7) - REGNAME(SH7750_CHCR3_A7) - REGNAME(SH7750_DMAOR_A7) - REGNAME(SH7750_PCTRA_A7) - REGNAME(SH7750_PDTRA_A7) - REGNAME(SH7750_PCTRB_A7) - REGNAME(SH7750_PDTRB_A7) - REGNAME(SH7750_GPIOIC_A7) - REGNAME(SH7750_ICR_A7) - REGNAME(SH7750_BCR3_A7) - REGNAME(SH7750_BCR4_A7) - REGNAME(SH7750_SDMR2_A7) - REGNAME(SH7750_SDMR3_A7) {(uint32_t) - 1, NULL} + REGNAME(SH7750_PTEL_A7) + REGNAME(SH7750_PTEA_A7) + REGNAME(SH7750_TTB_A7) + REGNAME(SH7750_TEA_A7) + REGNAME(SH7750_MMUCR_A7) + REGNAME(SH7750_CCR_A7) + REGNAME(SH7750_QACR0_A7) + REGNAME(SH7750_QACR1_A7) + REGNAME(SH7750_TRA_A7) + REGNAME(SH7750_EXPEVT_A7) + REGNAME(SH7750_INTEVT_A7) + REGNAME(SH7750_STBCR_A7) + REGNAME(SH7750_STBCR2_A7) + REGNAME(SH7750_FRQCR_A7) + REGNAME(SH7750_WTCNT_A7) + REGNAME(SH7750_WTCSR_A7) + REGNAME(SH7750_R64CNT_A7) + REGNAME(SH7750_RSECCNT_A7) + REGNAME(SH7750_RMINCNT_A7) + REGNAME(SH7750_RHRCNT_A7) + REGNAME(SH7750_RWKCNT_A7) + REGNAME(SH7750_RDAYCNT_A7) + REGNAME(SH7750_RMONCNT_A7) + REGNAME(SH7750_RYRCNT_A7) + REGNAME(SH7750_RSECAR_A7) + REGNAME(SH7750_RMINAR_A7) + REGNAME(SH7750_RHRAR_A7) + REGNAME(SH7750_RWKAR_A7) + REGNAME(SH7750_RDAYAR_A7) + REGNAME(SH7750_RMONAR_A7) + REGNAME(SH7750_RCR1_A7) + REGNAME(SH7750_RCR2_A7) + REGNAME(SH7750_BCR1_A7) + REGNAME(SH7750_BCR2_A7) + REGNAME(SH7750_WCR1_A7) + REGNAME(SH7750_WCR2_A7) + REGNAME(SH7750_WCR3_A7) + REGNAME(SH7750_MCR_A7) + REGNAME(SH7750_PCR_A7) + REGNAME(SH7750_RTCSR_A7) + REGNAME(SH7750_RTCNT_A7) + REGNAME(SH7750_RTCOR_A7) + REGNAME(SH7750_RFCR_A7) + REGNAME(SH7750_SAR0_A7) + REGNAME(SH7750_SAR1_A7) + REGNAME(SH7750_SAR2_A7) + REGNAME(SH7750_SAR3_A7) + REGNAME(SH7750_DAR0_A7) + REGNAME(SH7750_DAR1_A7) + REGNAME(SH7750_DAR2_A7) + REGNAME(SH7750_DAR3_A7) + REGNAME(SH7750_DMATCR0_A7) + REGNAME(SH7750_DMATCR1_A7) + REGNAME(SH7750_DMATCR2_A7) + REGNAME(SH7750_DMATCR3_A7) + REGNAME(SH7750_CHCR0_A7) + REGNAME(SH7750_CHCR1_A7) + REGNAME(SH7750_CHCR2_A7) + REGNAME(SH7750_CHCR3_A7) + REGNAME(SH7750_DMAOR_A7) + REGNAME(SH7750_PCTRA_A7) + REGNAME(SH7750_PDTRA_A7) + REGNAME(SH7750_PCTRB_A7) + REGNAME(SH7750_PDTRB_A7) + REGNAME(SH7750_GPIOIC_A7) + REGNAME(SH7750_ICR_A7) + REGNAME(SH7750_BCR3_A7) + REGNAME(SH7750_BCR4_A7) + REGNAME(SH7750_SDMR2_A7) + REGNAME(SH7750_SDMR3_A7) + { (uint32_t)-1, NULL } }; const char *regname(uint32_t addr) { unsigned int i; - for (i = 0; regnames[i].regaddr != (uint32_t) - 1; i++) { - if (regnames[i].regaddr == addr) - return regnames[i].regname; + for (i = 0; regnames[i].regaddr != (uint32_t)-1; i++) { + if (regnames[i].regaddr == addr) { + return regnames[i].regname; + } } return ""; diff --git a/hw/sh4/sh7750_regs.h b/hw/sh4/sh7750_regs.h index ab073dadc7..beb571d5e9 100644 --- a/hw/sh4/sh7750_regs.h +++ b/hw/sh4/sh7750_regs.h @@ -43,9 +43,8 @@ * All register has 2 addresses: in 0xff000000 - 0xffffffff (P4 address) and * in 0x1f000000 - 0x1fffffff (area 7 address) */ -#define SH7750_P4_BASE 0xff000000 /* Accessible only in - privileged mode */ -#define SH7750_A7_BASE 0x1f000000 /* Accessible only using TLB */ +#define SH7750_P4_BASE 0xff000000 /* Accessible only in privileged mode */ +#define SH7750_A7_BASE 0x1f000000 /* Accessible only using TLB */ #define SH7750_P4_REG32(ofs) (SH7750_P4_BASE + (ofs)) #define SH7750_A7_REG32(ofs) (SH7750_A7_BASE + (ofs)) @@ -55,84 +54,84 @@ */ /* Page Table Entry High register - PTEH */ -#define SH7750_PTEH_REGOFS 0x000000 /* offset */ +#define SH7750_PTEH_REGOFS 0x000000 /* offset */ #define SH7750_PTEH SH7750_P4_REG32(SH7750_PTEH_REGOFS) #define SH7750_PTEH_A7 SH7750_A7_REG32(SH7750_PTEH_REGOFS) -#define SH7750_PTEH_VPN 0xfffffd00 /* Virtual page number */ +#define SH7750_PTEH_VPN 0xfffffd00 /* Virtual page number */ #define SH7750_PTEH_VPN_S 10 -#define SH7750_PTEH_ASID 0x000000ff /* Address space identifier */ +#define SH7750_PTEH_ASID 0x000000ff /* Address space identifier */ #define SH7750_PTEH_ASID_S 0 /* Page Table Entry Low register - PTEL */ -#define SH7750_PTEL_REGOFS 0x000004 /* offset */ +#define SH7750_PTEL_REGOFS 0x000004 /* offset */ #define SH7750_PTEL SH7750_P4_REG32(SH7750_PTEL_REGOFS) #define SH7750_PTEL_A7 SH7750_A7_REG32(SH7750_PTEL_REGOFS) -#define SH7750_PTEL_PPN 0x1ffffc00 /* Physical page number */ +#define SH7750_PTEL_PPN 0x1ffffc00 /* Physical page number */ #define SH7750_PTEL_PPN_S 10 -#define SH7750_PTEL_V 0x00000100 /* Validity (0-entry is invalid) */ -#define SH7750_PTEL_SZ1 0x00000080 /* Page size bit 1 */ -#define SH7750_PTEL_SZ0 0x00000010 /* Page size bit 0 */ -#define SH7750_PTEL_SZ_1KB 0x00000000 /* 1-kbyte page */ -#define SH7750_PTEL_SZ_4KB 0x00000010 /* 4-kbyte page */ -#define SH7750_PTEL_SZ_64KB 0x00000080 /* 64-kbyte page */ -#define SH7750_PTEL_SZ_1MB 0x00000090 /* 1-Mbyte page */ -#define SH7750_PTEL_PR 0x00000060 /* Protection Key Data */ -#define SH7750_PTEL_PR_ROPO 0x00000000 /* read-only in priv mode */ -#define SH7750_PTEL_PR_RWPO 0x00000020 /* read-write in priv mode */ -#define SH7750_PTEL_PR_ROPU 0x00000040 /* read-only in priv or user mode */ -#define SH7750_PTEL_PR_RWPU 0x00000060 /* read-write in priv or user mode */ -#define SH7750_PTEL_C 0x00000008 /* Cacheability - (0 - page not cacheable) */ -#define SH7750_PTEL_D 0x00000004 /* Dirty bit (1 - write has been - performed to a page) */ -#define SH7750_PTEL_SH 0x00000002 /* Share Status bit (1 - page are - shared by processes) */ -#define SH7750_PTEL_WT 0x00000001 /* Write-through bit, specifies the - cache write mode: - 0 - Copy-back mode - 1 - Write-through mode */ +#define SH7750_PTEL_V 0x00000100 /* Validity (0-entry is invalid) */ +#define SH7750_PTEL_SZ1 0x00000080 /* Page size bit 1 */ +#define SH7750_PTEL_SZ0 0x00000010 /* Page size bit 0 */ +#define SH7750_PTEL_SZ_1KB 0x00000000 /* 1-kbyte page */ +#define SH7750_PTEL_SZ_4KB 0x00000010 /* 4-kbyte page */ +#define SH7750_PTEL_SZ_64KB 0x00000080 /* 64-kbyte page */ +#define SH7750_PTEL_SZ_1MB 0x00000090 /* 1-Mbyte page */ +#define SH7750_PTEL_PR 0x00000060 /* Protection Key Data */ +#define SH7750_PTEL_PR_ROPO 0x00000000 /* read-only in priv mode */ +#define SH7750_PTEL_PR_RWPO 0x00000020 /* read-write in priv mode */ +#define SH7750_PTEL_PR_ROPU 0x00000040 /* read-only in priv or user mode */ +#define SH7750_PTEL_PR_RWPU 0x00000060 /* read-write in priv or user mode */ +#define SH7750_PTEL_C 0x00000008 /* Cacheability */ + /* (0 - page not cacheable) */ +#define SH7750_PTEL_D 0x00000004 /* Dirty bit (1 - write has been */ + /* performed to a page) */ +#define SH7750_PTEL_SH 0x00000002 /* Share Status bit (1 - page are */ + /* shared by processes) */ +#define SH7750_PTEL_WT 0x00000001 /* Write-through bit, specifies the */ + /* cache write mode: */ + /* 0 - Copy-back mode */ + /* 1 - Write-through mode */ /* Page Table Entry Assistance register - PTEA */ -#define SH7750_PTEA_REGOFS 0x000034 /* offset */ +#define SH7750_PTEA_REGOFS 0x000034 /* offset */ #define SH7750_PTEA SH7750_P4_REG32(SH7750_PTEA_REGOFS) #define SH7750_PTEA_A7 SH7750_A7_REG32(SH7750_PTEA_REGOFS) -#define SH7750_PTEA_TC 0x00000008 /* Timing Control bit - 0 - use area 5 wait states - 1 - use area 6 wait states */ -#define SH7750_PTEA_SA 0x00000007 /* Space Attribute bits: */ -#define SH7750_PTEA_SA_UNDEF 0x00000000 /* 0 - undefined */ -#define SH7750_PTEA_SA_IOVAR 0x00000001 /* 1 - variable-size I/O space */ -#define SH7750_PTEA_SA_IO8 0x00000002 /* 2 - 8-bit I/O space */ -#define SH7750_PTEA_SA_IO16 0x00000003 /* 3 - 16-bit I/O space */ -#define SH7750_PTEA_SA_CMEM8 0x00000004 /* 4 - 8-bit common memory space */ -#define SH7750_PTEA_SA_CMEM16 0x00000005 /* 5 - 16-bit common memory space */ -#define SH7750_PTEA_SA_AMEM8 0x00000006 /* 6 - 8-bit attr memory space */ -#define SH7750_PTEA_SA_AMEM16 0x00000007 /* 7 - 16-bit attr memory space */ +#define SH7750_PTEA_TC 0x00000008 /* Timing Control bit */ + /* 0 - use area 5 wait states */ + /* 1 - use area 6 wait states */ +#define SH7750_PTEA_SA 0x00000007 /* Space Attribute bits: */ +#define SH7750_PTEA_SA_UNDEF 0x00000000 /* 0 - undefined */ +#define SH7750_PTEA_SA_IOVAR 0x00000001 /* 1 - variable-size I/O space */ +#define SH7750_PTEA_SA_IO8 0x00000002 /* 2 - 8-bit I/O space */ +#define SH7750_PTEA_SA_IO16 0x00000003 /* 3 - 16-bit I/O space */ +#define SH7750_PTEA_SA_CMEM8 0x00000004 /* 4 - 8-bit common memory space */ +#define SH7750_PTEA_SA_CMEM16 0x00000005 /* 5 - 16-bit common memory space */ +#define SH7750_PTEA_SA_AMEM8 0x00000006 /* 6 - 8-bit attr memory space */ +#define SH7750_PTEA_SA_AMEM16 0x00000007 /* 7 - 16-bit attr memory space */ /* Translation table base register */ -#define SH7750_TTB_REGOFS 0x000008 /* offset */ +#define SH7750_TTB_REGOFS 0x000008 /* offset */ #define SH7750_TTB SH7750_P4_REG32(SH7750_TTB_REGOFS) #define SH7750_TTB_A7 SH7750_A7_REG32(SH7750_TTB_REGOFS) /* TLB exeption address register - TEA */ -#define SH7750_TEA_REGOFS 0x00000c /* offset */ +#define SH7750_TEA_REGOFS 0x00000c /* offset */ #define SH7750_TEA SH7750_P4_REG32(SH7750_TEA_REGOFS) #define SH7750_TEA_A7 SH7750_A7_REG32(SH7750_TEA_REGOFS) /* MMU control register - MMUCR */ -#define SH7750_MMUCR_REGOFS 0x000010 /* offset */ +#define SH7750_MMUCR_REGOFS 0x000010 /* offset */ #define SH7750_MMUCR SH7750_P4_REG32(SH7750_MMUCR_REGOFS) #define SH7750_MMUCR_A7 SH7750_A7_REG32(SH7750_MMUCR_REGOFS) -#define SH7750_MMUCR_AT 0x00000001 /* Address translation bit */ -#define SH7750_MMUCR_TI 0x00000004 /* TLB invalidate */ -#define SH7750_MMUCR_SV 0x00000100 /* Single Virtual Mode bit */ -#define SH7750_MMUCR_SQMD 0x00000200 /* Store Queue Mode bit */ -#define SH7750_MMUCR_URC 0x0000FC00 /* UTLB Replace Counter */ +#define SH7750_MMUCR_AT 0x00000001 /* Address translation bit */ +#define SH7750_MMUCR_TI 0x00000004 /* TLB invalidate */ +#define SH7750_MMUCR_SV 0x00000100 /* Single Virtual Mode bit */ +#define SH7750_MMUCR_SQMD 0x00000200 /* Store Queue Mode bit */ +#define SH7750_MMUCR_URC 0x0000FC00 /* UTLB Replace Counter */ #define SH7750_MMUCR_URC_S 10 -#define SH7750_MMUCR_URB 0x00FC0000 /* UTLB Replace Boundary */ +#define SH7750_MMUCR_URB 0x00FC0000 /* UTLB Replace Boundary */ #define SH7750_MMUCR_URB_S 18 -#define SH7750_MMUCR_LRUI 0xFC000000 /* Least Recently Used ITLB */ +#define SH7750_MMUCR_LRUI 0xFC000000 /* Least Recently Used ITLB */ #define SH7750_MMUCR_LRUI_S 26 @@ -145,30 +144,30 @@ */ /* Cache Control Register - CCR */ -#define SH7750_CCR_REGOFS 0x00001c /* offset */ +#define SH7750_CCR_REGOFS 0x00001c /* offset */ #define SH7750_CCR SH7750_P4_REG32(SH7750_CCR_REGOFS) #define SH7750_CCR_A7 SH7750_A7_REG32(SH7750_CCR_REGOFS) -#define SH7750_CCR_IIX 0x00008000 /* IC index enable bit */ -#define SH7750_CCR_ICI 0x00000800 /* IC invalidation bit: - set it to clear IC */ -#define SH7750_CCR_ICE 0x00000100 /* IC enable bit */ -#define SH7750_CCR_OIX 0x00000080 /* OC index enable bit */ -#define SH7750_CCR_ORA 0x00000020 /* OC RAM enable bit - if you set OCE = 0, - you should set ORA = 0 */ -#define SH7750_CCR_OCI 0x00000008 /* OC invalidation bit */ -#define SH7750_CCR_CB 0x00000004 /* Copy-back bit for P1 area */ -#define SH7750_CCR_WT 0x00000002 /* Write-through bit for P0,U0,P3 area */ -#define SH7750_CCR_OCE 0x00000001 /* OC enable bit */ +#define SH7750_CCR_IIX 0x00008000 /* IC index enable bit */ +#define SH7750_CCR_ICI 0x00000800 /* IC invalidation bit: */ + /* set it to clear IC */ +#define SH7750_CCR_ICE 0x00000100 /* IC enable bit */ +#define SH7750_CCR_OIX 0x00000080 /* OC index enable bit */ +#define SH7750_CCR_ORA 0x00000020 /* OC RAM enable bit */ + /* if you set OCE = 0, */ + /* you should set ORA = 0 */ +#define SH7750_CCR_OCI 0x00000008 /* OC invalidation bit */ +#define SH7750_CCR_CB 0x00000004 /* Copy-back bit for P1 area */ +#define SH7750_CCR_WT 0x00000002 /* Write-through bit for P0,U0,P3 area */ +#define SH7750_CCR_OCE 0x00000001 /* OC enable bit */ /* Queue address control register 0 - QACR0 */ -#define SH7750_QACR0_REGOFS 0x000038 /* offset */ +#define SH7750_QACR0_REGOFS 0x000038 /* offset */ #define SH7750_QACR0 SH7750_P4_REG32(SH7750_QACR0_REGOFS) #define SH7750_QACR0_A7 SH7750_A7_REG32(SH7750_QACR0_REGOFS) /* Queue address control register 1 - QACR1 */ -#define SH7750_QACR1_REGOFS 0x00003c /* offset */ +#define SH7750_QACR1_REGOFS 0x00003c /* offset */ #define SH7750_QACR1 SH7750_P4_REG32(SH7750_QACR1_REGOFS) #define SH7750_QACR1_A7 SH7750_A7_REG32(SH7750_QACR1_REGOFS) @@ -178,11 +177,11 @@ */ /* Immediate data for TRAPA instruction - TRA */ -#define SH7750_TRA_REGOFS 0x000020 /* offset */ +#define SH7750_TRA_REGOFS 0x000020 /* offset */ #define SH7750_TRA SH7750_P4_REG32(SH7750_TRA_REGOFS) #define SH7750_TRA_A7 SH7750_A7_REG32(SH7750_TRA_REGOFS) -#define SH7750_TRA_IMM 0x000003fd /* Immediate data operand */ +#define SH7750_TRA_IMM 0x000003fd /* Immediate data operand */ #define SH7750_TRA_IMM_S 2 /* Exeption event register - EXPEVT */ @@ -190,14 +189,14 @@ #define SH7750_EXPEVT SH7750_P4_REG32(SH7750_EXPEVT_REGOFS) #define SH7750_EXPEVT_A7 SH7750_A7_REG32(SH7750_EXPEVT_REGOFS) -#define SH7750_EXPEVT_EX 0x00000fff /* Exeption code */ +#define SH7750_EXPEVT_EX 0x00000fff /* Exeption code */ #define SH7750_EXPEVT_EX_S 0 /* Interrupt event register */ #define SH7750_INTEVT_REGOFS 0x000028 #define SH7750_INTEVT SH7750_P4_REG32(SH7750_INTEVT_REGOFS) #define SH7750_INTEVT_A7 SH7750_A7_REG32(SH7750_INTEVT_REGOFS) -#define SH7750_INTEVT_EX 0x00000fff /* Exeption code */ +#define SH7750_INTEVT_EX 0x00000fff /* Exeption code */ #define SH7750_INTEVT_EX_S 0 /* @@ -206,683 +205,684 @@ #define SH7750_EVT_TO_NUM(evt) ((evt) >> 5) /* Reset exception category */ -#define SH7750_EVT_POWER_ON_RST 0x000 /* Power-on reset */ -#define SH7750_EVT_MANUAL_RST 0x020 /* Manual reset */ -#define SH7750_EVT_TLB_MULT_HIT 0x140 /* TLB multiple-hit exception */ +#define SH7750_EVT_POWER_ON_RST 0x000 /* Power-on reset */ +#define SH7750_EVT_MANUAL_RST 0x020 /* Manual reset */ +#define SH7750_EVT_TLB_MULT_HIT 0x140 /* TLB multiple-hit exception */ /* General exception category */ -#define SH7750_EVT_USER_BREAK 0x1E0 /* User break */ -#define SH7750_EVT_IADDR_ERR 0x0E0 /* Instruction address error */ -#define SH7750_EVT_TLB_READ_MISS 0x040 /* ITLB miss exception / - DTLB miss exception (read) */ -#define SH7750_EVT_TLB_READ_PROTV 0x0A0 /* ITLB protection violation / - DTLB protection violation (read) */ -#define SH7750_EVT_ILLEGAL_INSTR 0x180 /* General Illegal Instruction - exception */ -#define SH7750_EVT_SLOT_ILLEGAL_INSTR 0x1A0 /* Slot Illegal Instruction - exception */ -#define SH7750_EVT_FPU_DISABLE 0x800 /* General FPU disable exception */ -#define SH7750_EVT_SLOT_FPU_DISABLE 0x820 /* Slot FPU disable exception */ -#define SH7750_EVT_DATA_READ_ERR 0x0E0 /* Data address error (read) */ -#define SH7750_EVT_DATA_WRITE_ERR 0x100 /* Data address error (write) */ -#define SH7750_EVT_DTLB_WRITE_MISS 0x060 /* DTLB miss exception (write) */ -#define SH7750_EVT_DTLB_WRITE_PROTV 0x0C0 /* DTLB protection violation - exception (write) */ -#define SH7750_EVT_FPU_EXCEPTION 0x120 /* FPU exception */ -#define SH7750_EVT_INITIAL_PGWRITE 0x080 /* Initial Page Write exception */ -#define SH7750_EVT_TRAPA 0x160 /* Unconditional trap (TRAPA) */ +#define SH7750_EVT_USER_BREAK 0x1E0 /* User break */ +#define SH7750_EVT_IADDR_ERR 0x0E0 /* Instruction address error */ +#define SH7750_EVT_TLB_READ_MISS 0x040 /* ITLB miss exception / */ + /* DTLB miss exception (read) */ +#define SH7750_EVT_TLB_READ_PROTV 0x0A0 /* ITLB protection violation, */ + /* DTLB protection violation */ + /* (read) */ +#define SH7750_EVT_ILLEGAL_INSTR 0x180 /* General Illegal Instruction */ + /* exception */ +#define SH7750_EVT_SLOT_ILLEGAL_INSTR 0x1A0 /* Slot Illegal Instruction */ + /* exception */ +#define SH7750_EVT_FPU_DISABLE 0x800 /* General FPU disable exception */ +#define SH7750_EVT_SLOT_FPU_DISABLE 0x820 /* Slot FPU disable exception */ +#define SH7750_EVT_DATA_READ_ERR 0x0E0 /* Data address error (read) */ +#define SH7750_EVT_DATA_WRITE_ERR 0x100 /* Data address error (write) */ +#define SH7750_EVT_DTLB_WRITE_MISS 0x060 /* DTLB miss exception (write) */ +#define SH7750_EVT_DTLB_WRITE_PROTV 0x0C0 /* DTLB protection violation */ + /* exception (write) */ +#define SH7750_EVT_FPU_EXCEPTION 0x120 /* FPU exception */ +#define SH7750_EVT_INITIAL_PGWRITE 0x080 /* Initial Page Write exception */ +#define SH7750_EVT_TRAPA 0x160 /* Unconditional trap (TRAPA) */ /* Interrupt exception category */ -#define SH7750_EVT_NMI 0x1C0 /* Non-maskable interrupt */ -#define SH7750_EVT_IRQ0 0x200 /* External Interrupt 0 */ -#define SH7750_EVT_IRQ1 0x220 /* External Interrupt 1 */ -#define SH7750_EVT_IRQ2 0x240 /* External Interrupt 2 */ -#define SH7750_EVT_IRQ3 0x260 /* External Interrupt 3 */ -#define SH7750_EVT_IRQ4 0x280 /* External Interrupt 4 */ -#define SH7750_EVT_IRQ5 0x2A0 /* External Interrupt 5 */ -#define SH7750_EVT_IRQ6 0x2C0 /* External Interrupt 6 */ -#define SH7750_EVT_IRQ7 0x2E0 /* External Interrupt 7 */ -#define SH7750_EVT_IRQ8 0x300 /* External Interrupt 8 */ -#define SH7750_EVT_IRQ9 0x320 /* External Interrupt 9 */ -#define SH7750_EVT_IRQA 0x340 /* External Interrupt A */ -#define SH7750_EVT_IRQB 0x360 /* External Interrupt B */ -#define SH7750_EVT_IRQC 0x380 /* External Interrupt C */ -#define SH7750_EVT_IRQD 0x3A0 /* External Interrupt D */ -#define SH7750_EVT_IRQE 0x3C0 /* External Interrupt E */ +#define SH7750_EVT_NMI 0x1C0 /* Non-maskable interrupt */ +#define SH7750_EVT_IRQ0 0x200 /* External Interrupt 0 */ +#define SH7750_EVT_IRQ1 0x220 /* External Interrupt 1 */ +#define SH7750_EVT_IRQ2 0x240 /* External Interrupt 2 */ +#define SH7750_EVT_IRQ3 0x260 /* External Interrupt 3 */ +#define SH7750_EVT_IRQ4 0x280 /* External Interrupt 4 */ +#define SH7750_EVT_IRQ5 0x2A0 /* External Interrupt 5 */ +#define SH7750_EVT_IRQ6 0x2C0 /* External Interrupt 6 */ +#define SH7750_EVT_IRQ7 0x2E0 /* External Interrupt 7 */ +#define SH7750_EVT_IRQ8 0x300 /* External Interrupt 8 */ +#define SH7750_EVT_IRQ9 0x320 /* External Interrupt 9 */ +#define SH7750_EVT_IRQA 0x340 /* External Interrupt A */ +#define SH7750_EVT_IRQB 0x360 /* External Interrupt B */ +#define SH7750_EVT_IRQC 0x380 /* External Interrupt C */ +#define SH7750_EVT_IRQD 0x3A0 /* External Interrupt D */ +#define SH7750_EVT_IRQE 0x3C0 /* External Interrupt E */ /* Peripheral Module Interrupts - Timer Unit (TMU) */ -#define SH7750_EVT_TUNI0 0x400 /* TMU Underflow Interrupt 0 */ -#define SH7750_EVT_TUNI1 0x420 /* TMU Underflow Interrupt 1 */ -#define SH7750_EVT_TUNI2 0x440 /* TMU Underflow Interrupt 2 */ -#define SH7750_EVT_TICPI2 0x460 /* TMU Input Capture Interrupt 2 */ +#define SH7750_EVT_TUNI0 0x400 /* TMU Underflow Interrupt 0 */ +#define SH7750_EVT_TUNI1 0x420 /* TMU Underflow Interrupt 1 */ +#define SH7750_EVT_TUNI2 0x440 /* TMU Underflow Interrupt 2 */ +#define SH7750_EVT_TICPI2 0x460 /* TMU Input Capture Interrupt 2 */ /* Peripheral Module Interrupts - Real-Time Clock (RTC) */ -#define SH7750_EVT_RTC_ATI 0x480 /* Alarm Interrupt Request */ -#define SH7750_EVT_RTC_PRI 0x4A0 /* Periodic Interrupt Request */ -#define SH7750_EVT_RTC_CUI 0x4C0 /* Carry Interrupt Request */ +#define SH7750_EVT_RTC_ATI 0x480 /* Alarm Interrupt Request */ +#define SH7750_EVT_RTC_PRI 0x4A0 /* Periodic Interrupt Request */ +#define SH7750_EVT_RTC_CUI 0x4C0 /* Carry Interrupt Request */ /* Peripheral Module Interrupts - Serial Communication Interface (SCI) */ -#define SH7750_EVT_SCI_ERI 0x4E0 /* Receive Error */ -#define SH7750_EVT_SCI_RXI 0x500 /* Receive Data Register Full */ -#define SH7750_EVT_SCI_TXI 0x520 /* Transmit Data Register Empty */ -#define SH7750_EVT_SCI_TEI 0x540 /* Transmit End */ +#define SH7750_EVT_SCI_ERI 0x4E0 /* Receive Error */ +#define SH7750_EVT_SCI_RXI 0x500 /* Receive Data Register Full */ +#define SH7750_EVT_SCI_TXI 0x520 /* Transmit Data Register Empty */ +#define SH7750_EVT_SCI_TEI 0x540 /* Transmit End */ /* Peripheral Module Interrupts - Watchdog Timer (WDT) */ -#define SH7750_EVT_WDT_ITI 0x560 /* Interval Timer Interrupt - (used when WDT operates in - interval timer mode) */ +#define SH7750_EVT_WDT_ITI 0x560 /* Interval Timer Interrupt */ + /* (used when WDT operates in */ + /* interval timer mode) */ /* Peripheral Module Interrupts - Memory Refresh Unit (REF) */ -#define SH7750_EVT_REF_RCMI 0x580 /* Compare-match Interrupt */ -#define SH7750_EVT_REF_ROVI 0x5A0 /* Refresh Counter Overflow - interrupt */ +#define SH7750_EVT_REF_RCMI 0x580 /* Compare-match Interrupt */ +#define SH7750_EVT_REF_ROVI 0x5A0 /* Refresh Counter Overflow */ + /* interrupt */ /* Peripheral Module Interrupts - Hitachi User Debug Interface (H-UDI) */ -#define SH7750_EVT_HUDI 0x600 /* UDI interrupt */ +#define SH7750_EVT_HUDI 0x600 /* UDI interrupt */ /* Peripheral Module Interrupts - General-Purpose I/O (GPIO) */ -#define SH7750_EVT_GPIO 0x620 /* GPIO Interrupt */ +#define SH7750_EVT_GPIO 0x620 /* GPIO Interrupt */ /* Peripheral Module Interrupts - DMA Controller (DMAC) */ -#define SH7750_EVT_DMAC_DMTE0 0x640 /* DMAC 0 Transfer End Interrupt */ -#define SH7750_EVT_DMAC_DMTE1 0x660 /* DMAC 1 Transfer End Interrupt */ -#define SH7750_EVT_DMAC_DMTE2 0x680 /* DMAC 2 Transfer End Interrupt */ -#define SH7750_EVT_DMAC_DMTE3 0x6A0 /* DMAC 3 Transfer End Interrupt */ -#define SH7750_EVT_DMAC_DMAE 0x6C0 /* DMAC Address Error Interrupt */ +#define SH7750_EVT_DMAC_DMTE0 0x640 /* DMAC 0 Transfer End Interrupt */ +#define SH7750_EVT_DMAC_DMTE1 0x660 /* DMAC 1 Transfer End Interrupt */ +#define SH7750_EVT_DMAC_DMTE2 0x680 /* DMAC 2 Transfer End Interrupt */ +#define SH7750_EVT_DMAC_DMTE3 0x6A0 /* DMAC 3 Transfer End Interrupt */ +#define SH7750_EVT_DMAC_DMAE 0x6C0 /* DMAC Address Error Interrupt */ -/* Peripheral Module Interrupts - Serial Communication Interface with FIFO */ -/* (SCIF) */ -#define SH7750_EVT_SCIF_ERI 0x700 /* Receive Error */ -#define SH7750_EVT_SCIF_RXI 0x720 /* Receive FIFO Data Full or - Receive Data ready interrupt */ -#define SH7750_EVT_SCIF_BRI 0x740 /* Break or overrun error */ -#define SH7750_EVT_SCIF_TXI 0x760 /* Transmit FIFO Data Empty */ +/* Peripheral Module Interrupts Serial Communication Interface w/ FIFO (SCIF) */ +#define SH7750_EVT_SCIF_ERI 0x700 /* Receive Error */ +#define SH7750_EVT_SCIF_RXI 0x720 /* Receive FIFO Data Full or */ + /* Receive Data ready interrupt */ +#define SH7750_EVT_SCIF_BRI 0x740 /* Break or overrun error */ +#define SH7750_EVT_SCIF_TXI 0x760 /* Transmit FIFO Data Empty */ /* * Power Management */ -#define SH7750_STBCR_REGOFS 0xC00004 /* offset */ +#define SH7750_STBCR_REGOFS 0xC00004 /* offset */ #define SH7750_STBCR SH7750_P4_REG32(SH7750_STBCR_REGOFS) #define SH7750_STBCR_A7 SH7750_A7_REG32(SH7750_STBCR_REGOFS) -#define SH7750_STBCR_STBY 0x80 /* Specifies a transition to standby mode: - 0 - Transition to SLEEP mode on SLEEP - 1 - Transition to STANDBY mode on SLEEP */ -#define SH7750_STBCR_PHZ 0x40 /* State of peripheral module pins in - standby mode: - 0 - normal state - 1 - high-impendance state */ +#define SH7750_STBCR_STBY 0x80 /* Specifies a transition to standby mode: */ + /* 0 Transition to SLEEP mode on SLEEP */ + /* 1 Transition to STANDBY mode on SLEEP */ +#define SH7750_STBCR_PHZ 0x40 /* State of peripheral module pins in */ + /* standby mode: */ + /* 0 normal state */ + /* 1 high-impendance state */ -#define SH7750_STBCR_PPU 0x20 /* Peripheral module pins pull-up controls */ -#define SH7750_STBCR_MSTP4 0x10 /* Stopping the clock supply to DMAC */ +#define SH7750_STBCR_PPU 0x20 /* Peripheral module pins pull-up controls */ +#define SH7750_STBCR_MSTP4 0x10 /* Stopping the clock supply to DMAC */ #define SH7750_STBCR_DMAC_STP SH7750_STBCR_MSTP4 -#define SH7750_STBCR_MSTP3 0x08 /* Stopping the clock supply to SCIF */ +#define SH7750_STBCR_MSTP3 0x08 /* Stopping the clock supply to SCIF */ #define SH7750_STBCR_SCIF_STP SH7750_STBCR_MSTP3 -#define SH7750_STBCR_MSTP2 0x04 /* Stopping the clock supply to TMU */ +#define SH7750_STBCR_MSTP2 0x04 /* Stopping the clock supply to TMU */ #define SH7750_STBCR_TMU_STP SH7750_STBCR_MSTP2 -#define SH7750_STBCR_MSTP1 0x02 /* Stopping the clock supply to RTC */ +#define SH7750_STBCR_MSTP1 0x02 /* Stopping the clock supply to RTC */ #define SH7750_STBCR_RTC_STP SH7750_STBCR_MSTP1 -#define SH7750_STBCR_MSPT0 0x01 /* Stopping the clock supply to SCI */ +#define SH7750_STBCR_MSPT0 0x01 /* Stopping the clock supply to SCI */ #define SH7750_STBCR_SCI_STP SH7750_STBCR_MSTP0 #define SH7750_STBCR_STBY 0x80 -#define SH7750_STBCR2_REGOFS 0xC00010 /* offset */ +#define SH7750_STBCR2_REGOFS 0xC00010 /* offset */ #define SH7750_STBCR2 SH7750_P4_REG32(SH7750_STBCR2_REGOFS) #define SH7750_STBCR2_A7 SH7750_A7_REG32(SH7750_STBCR2_REGOFS) -#define SH7750_STBCR2_DSLP 0x80 /* Specifies transition to deep sleep mode: - 0 - transition to sleep or standby mode - as it is specified in STBY bit - 1 - transition to deep sleep mode on - execution of SLEEP instruction */ -#define SH7750_STBCR2_MSTP6 0x02 /* Stopping the clock supply to Store Queue - in the cache controller */ +#define SH7750_STBCR2_DSLP 0x80 /* Specifies transition to deep sleep mode */ + /* 0 transition to sleep or standby mode */ + /* as it is specified in STBY bit */ + /* 1 transition to deep sleep mode on */ + /* execution of SLEEP instruction */ +#define SH7750_STBCR2_MSTP6 0x02 /* Stopping the clock supply to the */ + /* Store Queue in the cache controller */ #define SH7750_STBCR2_SQ_STP SH7750_STBCR2_MSTP6 -#define SH7750_STBCR2_MSTP5 0x01 /* Stopping the clock supply to the User - Break Controller (UBC) */ +#define SH7750_STBCR2_MSTP5 0x01 /* Stopping the clock supply to the */ + /* User Break Controller (UBC) */ #define SH7750_STBCR2_UBC_STP SH7750_STBCR2_MSTP5 /* * Clock Pulse Generator (CPG) */ -#define SH7750_FRQCR_REGOFS 0xC00000 /* offset */ +#define SH7750_FRQCR_REGOFS 0xC00000 /* offset */ #define SH7750_FRQCR SH7750_P4_REG32(SH7750_FRQCR_REGOFS) #define SH7750_FRQCR_A7 SH7750_A7_REG32(SH7750_FRQCR_REGOFS) -#define SH7750_FRQCR_CKOEN 0x0800 /* Clock Output Enable - 0 - CKIO pin goes to HiZ/pullup - 1 - Clock is output from CKIO */ -#define SH7750_FRQCR_PLL1EN 0x0400 /* PLL circuit 1 enable */ -#define SH7750_FRQCR_PLL2EN 0x0200 /* PLL circuit 2 enable */ +#define SH7750_FRQCR_CKOEN 0x0800 /* Clock Output Enable */ + /* 0 - CKIO pin goes to HiZ/pullup */ + /* 1 - Clock is output from CKIO */ +#define SH7750_FRQCR_PLL1EN 0x0400 /* PLL circuit 1 enable */ +#define SH7750_FRQCR_PLL2EN 0x0200 /* PLL circuit 2 enable */ -#define SH7750_FRQCR_IFC 0x01C0 /* CPU clock frequency division ratio: */ -#define SH7750_FRQCR_IFCDIV1 0x0000 /* 0 - * 1 */ -#define SH7750_FRQCR_IFCDIV2 0x0040 /* 1 - * 1/2 */ -#define SH7750_FRQCR_IFCDIV3 0x0080 /* 2 - * 1/3 */ -#define SH7750_FRQCR_IFCDIV4 0x00C0 /* 3 - * 1/4 */ -#define SH7750_FRQCR_IFCDIV6 0x0100 /* 4 - * 1/6 */ -#define SH7750_FRQCR_IFCDIV8 0x0140 /* 5 - * 1/8 */ +#define SH7750_FRQCR_IFC 0x01C0 /* CPU clock frequency division ratio: */ +#define SH7750_FRQCR_IFCDIV1 0x0000 /* 0 - * 1 */ +#define SH7750_FRQCR_IFCDIV2 0x0040 /* 1 - * 1/2 */ +#define SH7750_FRQCR_IFCDIV3 0x0080 /* 2 - * 1/3 */ +#define SH7750_FRQCR_IFCDIV4 0x00C0 /* 3 - * 1/4 */ +#define SH7750_FRQCR_IFCDIV6 0x0100 /* 4 - * 1/6 */ +#define SH7750_FRQCR_IFCDIV8 0x0140 /* 5 - * 1/8 */ -#define SH7750_FRQCR_BFC 0x0038 /* Bus clock frequency division ratio: */ -#define SH7750_FRQCR_BFCDIV1 0x0000 /* 0 - * 1 */ -#define SH7750_FRQCR_BFCDIV2 0x0008 /* 1 - * 1/2 */ -#define SH7750_FRQCR_BFCDIV3 0x0010 /* 2 - * 1/3 */ -#define SH7750_FRQCR_BFCDIV4 0x0018 /* 3 - * 1/4 */ -#define SH7750_FRQCR_BFCDIV6 0x0020 /* 4 - * 1/6 */ -#define SH7750_FRQCR_BFCDIV8 0x0028 /* 5 - * 1/8 */ +#define SH7750_FRQCR_BFC 0x0038 /* Bus clock frequency division ratio: */ +#define SH7750_FRQCR_BFCDIV1 0x0000 /* 0 - * 1 */ +#define SH7750_FRQCR_BFCDIV2 0x0008 /* 1 - * 1/2 */ +#define SH7750_FRQCR_BFCDIV3 0x0010 /* 2 - * 1/3 */ +#define SH7750_FRQCR_BFCDIV4 0x0018 /* 3 - * 1/4 */ +#define SH7750_FRQCR_BFCDIV6 0x0020 /* 4 - * 1/6 */ +#define SH7750_FRQCR_BFCDIV8 0x0028 /* 5 - * 1/8 */ -#define SH7750_FRQCR_PFC 0x0007 /* Peripheral module clock frequency - division ratio: */ -#define SH7750_FRQCR_PFCDIV2 0x0000 /* 0 - * 1/2 */ -#define SH7750_FRQCR_PFCDIV3 0x0001 /* 1 - * 1/3 */ -#define SH7750_FRQCR_PFCDIV4 0x0002 /* 2 - * 1/4 */ -#define SH7750_FRQCR_PFCDIV6 0x0003 /* 3 - * 1/6 */ -#define SH7750_FRQCR_PFCDIV8 0x0004 /* 4 - * 1/8 */ +#define SH7750_FRQCR_PFC 0x0007 /* Peripheral module clock frequency */ + /* division ratio: */ +#define SH7750_FRQCR_PFCDIV2 0x0000 /* 0 - * 1/2 */ +#define SH7750_FRQCR_PFCDIV3 0x0001 /* 1 - * 1/3 */ +#define SH7750_FRQCR_PFCDIV4 0x0002 /* 2 - * 1/4 */ +#define SH7750_FRQCR_PFCDIV6 0x0003 /* 3 - * 1/6 */ +#define SH7750_FRQCR_PFCDIV8 0x0004 /* 4 - * 1/8 */ /* * Watchdog Timer (WDT) */ /* Watchdog Timer Counter register - WTCNT */ -#define SH7750_WTCNT_REGOFS 0xC00008 /* offset */ +#define SH7750_WTCNT_REGOFS 0xC00008 /* offset */ #define SH7750_WTCNT SH7750_P4_REG32(SH7750_WTCNT_REGOFS) #define SH7750_WTCNT_A7 SH7750_A7_REG32(SH7750_WTCNT_REGOFS) -#define SH7750_WTCNT_KEY 0x5A00 /* When WTCNT byte register written, - you have to set the upper byte to - 0x5A */ +#define SH7750_WTCNT_KEY 0x5A00 /* When WTCNT byte register written, you */ + /* have to set the upper byte to 0x5A */ /* Watchdog Timer Control/Status register - WTCSR */ -#define SH7750_WTCSR_REGOFS 0xC0000C /* offset */ +#define SH7750_WTCSR_REGOFS 0xC0000C /* offset */ #define SH7750_WTCSR SH7750_P4_REG32(SH7750_WTCSR_REGOFS) #define SH7750_WTCSR_A7 SH7750_A7_REG32(SH7750_WTCSR_REGOFS) -#define SH7750_WTCSR_KEY 0xA500 /* When WTCSR byte register written, - you have to set the upper byte to - 0xA5 */ -#define SH7750_WTCSR_TME 0x80 /* Timer enable (1-upcount start) */ -#define SH7750_WTCSR_MODE 0x40 /* Timer Mode Select: */ -#define SH7750_WTCSR_MODE_WT 0x40 /* Watchdog Timer Mode */ -#define SH7750_WTCSR_MODE_IT 0x00 /* Interval Timer Mode */ -#define SH7750_WTCSR_RSTS 0x20 /* Reset Select: */ -#define SH7750_WTCSR_RST_MAN 0x20 /* Manual Reset */ -#define SH7750_WTCSR_RST_PWR 0x00 /* Power-on Reset */ -#define SH7750_WTCSR_WOVF 0x10 /* Watchdog Timer Overflow Flag */ -#define SH7750_WTCSR_IOVF 0x08 /* Interval Timer Overflow Flag */ -#define SH7750_WTCSR_CKS 0x07 /* Clock Select: */ -#define SH7750_WTCSR_CKS_DIV32 0x00 /* 1/32 of frequency divider 2 input */ -#define SH7750_WTCSR_CKS_DIV64 0x01 /* 1/64 */ -#define SH7750_WTCSR_CKS_DIV128 0x02 /* 1/128 */ -#define SH7750_WTCSR_CKS_DIV256 0x03 /* 1/256 */ -#define SH7750_WTCSR_CKS_DIV512 0x04 /* 1/512 */ -#define SH7750_WTCSR_CKS_DIV1024 0x05 /* 1/1024 */ -#define SH7750_WTCSR_CKS_DIV2048 0x06 /* 1/2048 */ -#define SH7750_WTCSR_CKS_DIV4096 0x07 /* 1/4096 */ +#define SH7750_WTCSR_KEY 0xA500 /* When WTCSR byte register written, you */ + /* have to set the upper byte to 0xA5 */ +#define SH7750_WTCSR_TME 0x80 /* Timer enable (1-upcount start) */ +#define SH7750_WTCSR_MODE 0x40 /* Timer Mode Select: */ +#define SH7750_WTCSR_MODE_WT 0x40 /* Watchdog Timer Mode */ +#define SH7750_WTCSR_MODE_IT 0x00 /* Interval Timer Mode */ +#define SH7750_WTCSR_RSTS 0x20 /* Reset Select: */ +#define SH7750_WTCSR_RST_MAN 0x20 /* Manual Reset */ +#define SH7750_WTCSR_RST_PWR 0x00 /* Power-on Reset */ +#define SH7750_WTCSR_WOVF 0x10 /* Watchdog Timer Overflow Flag */ +#define SH7750_WTCSR_IOVF 0x08 /* Interval Timer Overflow Flag */ +#define SH7750_WTCSR_CKS 0x07 /* Clock Select: */ +#define SH7750_WTCSR_CKS_DIV32 0x00 /* 1/32 of frequency divider 2 input */ +#define SH7750_WTCSR_CKS_DIV64 0x01 /* 1/64 */ +#define SH7750_WTCSR_CKS_DIV128 0x02 /* 1/128 */ +#define SH7750_WTCSR_CKS_DIV256 0x03 /* 1/256 */ +#define SH7750_WTCSR_CKS_DIV512 0x04 /* 1/512 */ +#define SH7750_WTCSR_CKS_DIV1024 0x05 /* 1/1024 */ +#define SH7750_WTCSR_CKS_DIV2048 0x06 /* 1/2048 */ +#define SH7750_WTCSR_CKS_DIV4096 0x07 /* 1/4096 */ /* * Real-Time Clock (RTC) */ /* 64-Hz Counter Register (byte, read-only) - R64CNT */ -#define SH7750_R64CNT_REGOFS 0xC80000 /* offset */ +#define SH7750_R64CNT_REGOFS 0xC80000 /* offset */ #define SH7750_R64CNT SH7750_P4_REG32(SH7750_R64CNT_REGOFS) #define SH7750_R64CNT_A7 SH7750_A7_REG32(SH7750_R64CNT_REGOFS) /* Second Counter Register (byte, BCD-coded) - RSECCNT */ -#define SH7750_RSECCNT_REGOFS 0xC80004 /* offset */ +#define SH7750_RSECCNT_REGOFS 0xC80004 /* offset */ #define SH7750_RSECCNT SH7750_P4_REG32(SH7750_RSECCNT_REGOFS) #define SH7750_RSECCNT_A7 SH7750_A7_REG32(SH7750_RSECCNT_REGOFS) /* Minute Counter Register (byte, BCD-coded) - RMINCNT */ -#define SH7750_RMINCNT_REGOFS 0xC80008 /* offset */ +#define SH7750_RMINCNT_REGOFS 0xC80008 /* offset */ #define SH7750_RMINCNT SH7750_P4_REG32(SH7750_RMINCNT_REGOFS) #define SH7750_RMINCNT_A7 SH7750_A7_REG32(SH7750_RMINCNT_REGOFS) /* Hour Counter Register (byte, BCD-coded) - RHRCNT */ -#define SH7750_RHRCNT_REGOFS 0xC8000C /* offset */ +#define SH7750_RHRCNT_REGOFS 0xC8000C /* offset */ #define SH7750_RHRCNT SH7750_P4_REG32(SH7750_RHRCNT_REGOFS) #define SH7750_RHRCNT_A7 SH7750_A7_REG32(SH7750_RHRCNT_REGOFS) /* Day-of-Week Counter Register (byte) - RWKCNT */ -#define SH7750_RWKCNT_REGOFS 0xC80010 /* offset */ +#define SH7750_RWKCNT_REGOFS 0xC80010 /* offset */ #define SH7750_RWKCNT SH7750_P4_REG32(SH7750_RWKCNT_REGOFS) #define SH7750_RWKCNT_A7 SH7750_A7_REG32(SH7750_RWKCNT_REGOFS) -#define SH7750_RWKCNT_SUN 0 /* Sunday */ -#define SH7750_RWKCNT_MON 1 /* Monday */ -#define SH7750_RWKCNT_TUE 2 /* Tuesday */ -#define SH7750_RWKCNT_WED 3 /* Wednesday */ -#define SH7750_RWKCNT_THU 4 /* Thursday */ -#define SH7750_RWKCNT_FRI 5 /* Friday */ -#define SH7750_RWKCNT_SAT 6 /* Saturday */ +#define SH7750_RWKCNT_SUN 0 /* Sunday */ +#define SH7750_RWKCNT_MON 1 /* Monday */ +#define SH7750_RWKCNT_TUE 2 /* Tuesday */ +#define SH7750_RWKCNT_WED 3 /* Wednesday */ +#define SH7750_RWKCNT_THU 4 /* Thursday */ +#define SH7750_RWKCNT_FRI 5 /* Friday */ +#define SH7750_RWKCNT_SAT 6 /* Saturday */ /* Day Counter Register (byte, BCD-coded) - RDAYCNT */ -#define SH7750_RDAYCNT_REGOFS 0xC80014 /* offset */ +#define SH7750_RDAYCNT_REGOFS 0xC80014 /* offset */ #define SH7750_RDAYCNT SH7750_P4_REG32(SH7750_RDAYCNT_REGOFS) #define SH7750_RDAYCNT_A7 SH7750_A7_REG32(SH7750_RDAYCNT_REGOFS) /* Month Counter Register (byte, BCD-coded) - RMONCNT */ -#define SH7750_RMONCNT_REGOFS 0xC80018 /* offset */ +#define SH7750_RMONCNT_REGOFS 0xC80018 /* offset */ #define SH7750_RMONCNT SH7750_P4_REG32(SH7750_RMONCNT_REGOFS) #define SH7750_RMONCNT_A7 SH7750_A7_REG32(SH7750_RMONCNT_REGOFS) /* Year Counter Register (half, BCD-coded) - RYRCNT */ -#define SH7750_RYRCNT_REGOFS 0xC8001C /* offset */ +#define SH7750_RYRCNT_REGOFS 0xC8001C /* offset */ #define SH7750_RYRCNT SH7750_P4_REG32(SH7750_RYRCNT_REGOFS) #define SH7750_RYRCNT_A7 SH7750_A7_REG32(SH7750_RYRCNT_REGOFS) /* Second Alarm Register (byte, BCD-coded) - RSECAR */ -#define SH7750_RSECAR_REGOFS 0xC80020 /* offset */ +#define SH7750_RSECAR_REGOFS 0xC80020 /* offset */ #define SH7750_RSECAR SH7750_P4_REG32(SH7750_RSECAR_REGOFS) #define SH7750_RSECAR_A7 SH7750_A7_REG32(SH7750_RSECAR_REGOFS) -#define SH7750_RSECAR_ENB 0x80 /* Second Alarm Enable */ +#define SH7750_RSECAR_ENB 0x80 /* Second Alarm Enable */ /* Minute Alarm Register (byte, BCD-coded) - RMINAR */ -#define SH7750_RMINAR_REGOFS 0xC80024 /* offset */ +#define SH7750_RMINAR_REGOFS 0xC80024 /* offset */ #define SH7750_RMINAR SH7750_P4_REG32(SH7750_RMINAR_REGOFS) #define SH7750_RMINAR_A7 SH7750_A7_REG32(SH7750_RMINAR_REGOFS) -#define SH7750_RMINAR_ENB 0x80 /* Minute Alarm Enable */ +#define SH7750_RMINAR_ENB 0x80 /* Minute Alarm Enable */ /* Hour Alarm Register (byte, BCD-coded) - RHRAR */ -#define SH7750_RHRAR_REGOFS 0xC80028 /* offset */ +#define SH7750_RHRAR_REGOFS 0xC80028 /* offset */ #define SH7750_RHRAR SH7750_P4_REG32(SH7750_RHRAR_REGOFS) #define SH7750_RHRAR_A7 SH7750_A7_REG32(SH7750_RHRAR_REGOFS) -#define SH7750_RHRAR_ENB 0x80 /* Hour Alarm Enable */ +#define SH7750_RHRAR_ENB 0x80 /* Hour Alarm Enable */ /* Day-of-Week Alarm Register (byte) - RWKAR */ -#define SH7750_RWKAR_REGOFS 0xC8002C /* offset */ +#define SH7750_RWKAR_REGOFS 0xC8002C /* offset */ #define SH7750_RWKAR SH7750_P4_REG32(SH7750_RWKAR_REGOFS) #define SH7750_RWKAR_A7 SH7750_A7_REG32(SH7750_RWKAR_REGOFS) -#define SH7750_RWKAR_ENB 0x80 /* Day-of-week Alarm Enable */ +#define SH7750_RWKAR_ENB 0x80 /* Day-of-week Alarm Enable */ -#define SH7750_RWKAR_SUN 0 /* Sunday */ -#define SH7750_RWKAR_MON 1 /* Monday */ -#define SH7750_RWKAR_TUE 2 /* Tuesday */ -#define SH7750_RWKAR_WED 3 /* Wednesday */ -#define SH7750_RWKAR_THU 4 /* Thursday */ -#define SH7750_RWKAR_FRI 5 /* Friday */ -#define SH7750_RWKAR_SAT 6 /* Saturday */ +#define SH7750_RWKAR_SUN 0 /* Sunday */ +#define SH7750_RWKAR_MON 1 /* Monday */ +#define SH7750_RWKAR_TUE 2 /* Tuesday */ +#define SH7750_RWKAR_WED 3 /* Wednesday */ +#define SH7750_RWKAR_THU 4 /* Thursday */ +#define SH7750_RWKAR_FRI 5 /* Friday */ +#define SH7750_RWKAR_SAT 6 /* Saturday */ /* Day Alarm Register (byte, BCD-coded) - RDAYAR */ -#define SH7750_RDAYAR_REGOFS 0xC80030 /* offset */ +#define SH7750_RDAYAR_REGOFS 0xC80030 /* offset */ #define SH7750_RDAYAR SH7750_P4_REG32(SH7750_RDAYAR_REGOFS) #define SH7750_RDAYAR_A7 SH7750_A7_REG32(SH7750_RDAYAR_REGOFS) -#define SH7750_RDAYAR_ENB 0x80 /* Day Alarm Enable */ +#define SH7750_RDAYAR_ENB 0x80 /* Day Alarm Enable */ /* Month Counter Register (byte, BCD-coded) - RMONAR */ -#define SH7750_RMONAR_REGOFS 0xC80034 /* offset */ +#define SH7750_RMONAR_REGOFS 0xC80034 /* offset */ #define SH7750_RMONAR SH7750_P4_REG32(SH7750_RMONAR_REGOFS) #define SH7750_RMONAR_A7 SH7750_A7_REG32(SH7750_RMONAR_REGOFS) -#define SH7750_RMONAR_ENB 0x80 /* Month Alarm Enable */ +#define SH7750_RMONAR_ENB 0x80 /* Month Alarm Enable */ /* RTC Control Register 1 (byte) - RCR1 */ -#define SH7750_RCR1_REGOFS 0xC80038 /* offset */ +#define SH7750_RCR1_REGOFS 0xC80038 /* offset */ #define SH7750_RCR1 SH7750_P4_REG32(SH7750_RCR1_REGOFS) #define SH7750_RCR1_A7 SH7750_A7_REG32(SH7750_RCR1_REGOFS) -#define SH7750_RCR1_CF 0x80 /* Carry Flag */ -#define SH7750_RCR1_CIE 0x10 /* Carry Interrupt Enable */ -#define SH7750_RCR1_AIE 0x08 /* Alarm Interrupt Enable */ -#define SH7750_RCR1_AF 0x01 /* Alarm Flag */ +#define SH7750_RCR1_CF 0x80 /* Carry Flag */ +#define SH7750_RCR1_CIE 0x10 /* Carry Interrupt Enable */ +#define SH7750_RCR1_AIE 0x08 /* Alarm Interrupt Enable */ +#define SH7750_RCR1_AF 0x01 /* Alarm Flag */ /* RTC Control Register 2 (byte) - RCR2 */ -#define SH7750_RCR2_REGOFS 0xC8003C /* offset */ +#define SH7750_RCR2_REGOFS 0xC8003C /* offset */ #define SH7750_RCR2 SH7750_P4_REG32(SH7750_RCR2_REGOFS) #define SH7750_RCR2_A7 SH7750_A7_REG32(SH7750_RCR2_REGOFS) -#define SH7750_RCR2_PEF 0x80 /* Periodic Interrupt Flag */ -#define SH7750_RCR2_PES 0x70 /* Periodic Interrupt Enable: */ -#define SH7750_RCR2_PES_DIS 0x00 /* Periodic Interrupt Disabled */ -#define SH7750_RCR2_PES_DIV256 0x10 /* Generated at 1/256 sec interval */ -#define SH7750_RCR2_PES_DIV64 0x20 /* Generated at 1/64 sec interval */ -#define SH7750_RCR2_PES_DIV16 0x30 /* Generated at 1/16 sec interval */ -#define SH7750_RCR2_PES_DIV4 0x40 /* Generated at 1/4 sec interval */ -#define SH7750_RCR2_PES_DIV2 0x50 /* Generated at 1/2 sec interval */ -#define SH7750_RCR2_PES_x1 0x60 /* Generated at 1 sec interval */ -#define SH7750_RCR2_PES_x2 0x70 /* Generated at 2 sec interval */ -#define SH7750_RCR2_RTCEN 0x08 /* RTC Crystal Oscillator is Operated */ -#define SH7750_RCR2_ADJ 0x04 /* 30-Second Adjastment */ -#define SH7750_RCR2_RESET 0x02 /* Frequency divider circuits are reset */ -#define SH7750_RCR2_START 0x01 /* 0 - sec, min, hr, day-of-week, month, - year counters are stopped - 1 - sec, min, hr, day-of-week, month, - year counters operate normally */ +#define SH7750_RCR2_PEF 0x80 /* Periodic Interrupt Flag */ +#define SH7750_RCR2_PES 0x70 /* Periodic Interrupt Enable: */ +#define SH7750_RCR2_PES_DIS 0x00 /* Periodic Interrupt Disabled */ +#define SH7750_RCR2_PES_DIV256 0x10 /* Generated at 1/256 sec interval */ +#define SH7750_RCR2_PES_DIV64 0x20 /* Generated at 1/64 sec interval */ +#define SH7750_RCR2_PES_DIV16 0x30 /* Generated at 1/16 sec interval */ +#define SH7750_RCR2_PES_DIV4 0x40 /* Generated at 1/4 sec interval */ +#define SH7750_RCR2_PES_DIV2 0x50 /* Generated at 1/2 sec interval */ +#define SH7750_RCR2_PES_x1 0x60 /* Generated at 1 sec interval */ +#define SH7750_RCR2_PES_x2 0x70 /* Generated at 2 sec interval */ +#define SH7750_RCR2_RTCEN 0x08 /* RTC Crystal Oscillator is Operated */ +#define SH7750_RCR2_ADJ 0x04 /* 30-Second Adjastment */ +#define SH7750_RCR2_RESET 0x02 /* Frequency divider circuits are reset */ +#define SH7750_RCR2_START 0x01 /* 0 - sec, min, hr, day-of-week, month, */ + /* year counters are stopped */ + /* 1 - sec, min, hr, day-of-week, month, */ + /* year counters operate normally */ /* * Bus State Controller - BSC */ /* Bus Control Register 1 - BCR1 */ -#define SH7750_BCR1_REGOFS 0x800000 /* offset */ +#define SH7750_BCR1_REGOFS 0x800000 /* offset */ #define SH7750_BCR1 SH7750_P4_REG32(SH7750_BCR1_REGOFS) #define SH7750_BCR1_A7 SH7750_A7_REG32(SH7750_BCR1_REGOFS) -#define SH7750_BCR1_ENDIAN 0x80000000 /* Endianness (1 - little endian) */ -#define SH7750_BCR1_MASTER 0x40000000 /* Master/Slave mode (1-master) */ -#define SH7750_BCR1_A0MPX 0x20000000 /* Area 0 Memory Type (0-SRAM,1-MPX) */ -#define SH7750_BCR1_IPUP 0x02000000 /* Input Pin Pull-up Control: - 0 - pull-up resistor is on for - control input pins - 1 - pull-up resistor is off */ -#define SH7750_BCR1_OPUP 0x01000000 /* Output Pin Pull-up Control: - 0 - pull-up resistor is on for - control output pins - 1 - pull-up resistor is off */ -#define SH7750_BCR1_A1MBC 0x00200000 /* Area 1 SRAM Byte Control Mode: - 0 - Area 1 SRAM is set to - normal mode - 1 - Area 1 SRAM is set to byte - control mode */ -#define SH7750_BCR1_A4MBC 0x00100000 /* Area 4 SRAM Byte Control Mode: - 0 - Area 4 SRAM is set to - normal mode - 1 - Area 4 SRAM is set to byte - control mode */ -#define SH7750_BCR1_BREQEN 0x00080000 /* BREQ Enable: - 0 - External requests are not - accepted - 1 - External requests are - accepted */ -#define SH7750_BCR1_PSHR 0x00040000 /* Partial Sharing Bit: - 0 - Master Mode - 1 - Partial-sharing Mode */ -#define SH7750_BCR1_MEMMPX 0x00020000 /* Area 1 to 6 MPX Interface: - 0 - SRAM/burst ROM interface - 1 - MPX interface */ -#define SH7750_BCR1_HIZMEM 0x00008000 /* High Impendance Control. Specifies - the state of A[25:0], BS\, CSn\, - RD/WR\, CE2A\, CE2B\ in standby - mode and when bus is released: - 0 - signals go to High-Z mode - 1 - signals driven */ -#define SH7750_BCR1_HIZCNT 0x00004000 /* High Impendance Control. Specifies - the state of the RAS\, RAS2\, WEn\, - CASn\, DQMn, RD\, CASS\, FRAME\, - RD2\ signals in standby mode and - when bus is released: - 0 - signals go to High-Z mode - 1 - signals driven */ -#define SH7750_BCR1_A0BST 0x00003800 /* Area 0 Burst ROM Control */ -#define SH7750_BCR1_A0BST_SRAM 0x0000 /* Area 0 accessed as SRAM i/f */ -#define SH7750_BCR1_A0BST_ROM4 0x0800 /* Area 0 accessed as burst ROM - interface, 4 cosequtive access */ -#define SH7750_BCR1_A0BST_ROM8 0x1000 /* Area 0 accessed as burst ROM - interface, 8 cosequtive access */ -#define SH7750_BCR1_A0BST_ROM16 0x1800 /* Area 0 accessed as burst ROM - interface, 16 cosequtive access */ -#define SH7750_BCR1_A0BST_ROM32 0x2000 /* Area 0 accessed as burst ROM - interface, 32 cosequtive access */ +#define SH7750_BCR1_ENDIAN 0x80000000 /* Endianness (1 - little endian) */ +#define SH7750_BCR1_MASTER 0x40000000 /* Master/Slave mode (1-master) */ +#define SH7750_BCR1_A0MPX 0x20000000 /* Area 0 Memory Type (0-SRAM,1-MPX) */ +#define SH7750_BCR1_IPUP 0x02000000 /* Input Pin Pull-up Control: */ + /* 0 - pull-up resistor is on for */ + /* control input pins */ + /* 1 - pull-up resistor is off */ +#define SH7750_BCR1_OPUP 0x01000000 /* Output Pin Pull-up Control: */ + /* 0 - pull-up resistor is on for */ + /* control output pins */ + /* 1 - pull-up resistor is off */ +#define SH7750_BCR1_A1MBC 0x00200000 /* Area 1 SRAM Byte Control Mode: */ + /* 0 - Area 1 SRAM is set to */ + /* normal mode */ + /* 1 - Area 1 SRAM is set to byte */ + /* control mode */ +#define SH7750_BCR1_A4MBC 0x00100000 /* Area 4 SRAM Byte Control Mode: */ + /* 0 - Area 4 SRAM is set to */ + /* normal mode */ + /* 1 - Area 4 SRAM is set to byte */ + /* control mode */ +#define SH7750_BCR1_BREQEN 0x00080000 /* BREQ Enable: */ + /* 0 - External requests are not */ + /* accepted */ + /* 1 - External requests are */ + /* accepted */ +#define SH7750_BCR1_PSHR 0x00040000 /* Partial Sharing Bit: */ + /* 0 - Master Mode */ + /* 1 - Partial-sharing Mode */ +#define SH7750_BCR1_MEMMPX 0x00020000 /* Area 1 to 6 MPX Interface: */ + /* 0 - SRAM/burst ROM interface */ + /* 1 - MPX interface */ +#define SH7750_BCR1_HIZMEM 0x00008000 /* High Impendance Control. */ + /* Specifies the state of A[25:0], */ + /* BS\, CSn\, RD/WR\, CE2A\, CE2B\ */ + /* in standby mode and when bus is */ + /* released: */ + /* 0 - signals go to High-Z mode */ + /* 1 - signals driven */ +#define SH7750_BCR1_HIZCNT 0x00004000 /* High Impendance Control. */ + /* Specifies the state of the */ + /* RAS\, RAS2\, WEn\, CASn\, DQMn, */ + /* RD\, CASS\, FRAME\, RD2\ */ + /* signals in standby mode and */ + /* when bus is released: */ + /* 0 - signals go to High-Z mode */ + /* 1 - signals driven */ +#define SH7750_BCR1_A0BST 0x00003800 /* Area 0 Burst ROM Control */ +#define SH7750_BCR1_A0BST_SRAM 0x0000 /* Area 0 accessed as SRAM i/f */ +#define SH7750_BCR1_A0BST_ROM4 0x0800 /* Area 0 accessed as burst ROM */ + /* interface, 4 cosequtive access */ +#define SH7750_BCR1_A0BST_ROM8 0x1000 /* Area 0 accessed as burst ROM */ + /* interface, 8 cosequtive access */ +#define SH7750_BCR1_A0BST_ROM16 0x1800 /* Area 0 accessed as burst ROM */ + /* interface, 16 cosequtive access */ +#define SH7750_BCR1_A0BST_ROM32 0x2000 /* Area 0 accessed as burst ROM */ + /* interface, 32 cosequtive access */ -#define SH7750_BCR1_A5BST 0x00000700 /* Area 5 Burst ROM Control */ -#define SH7750_BCR1_A5BST_SRAM 0x0000 /* Area 5 accessed as SRAM i/f */ -#define SH7750_BCR1_A5BST_ROM4 0x0100 /* Area 5 accessed as burst ROM - interface, 4 cosequtive access */ -#define SH7750_BCR1_A5BST_ROM8 0x0200 /* Area 5 accessed as burst ROM - interface, 8 cosequtive access */ -#define SH7750_BCR1_A5BST_ROM16 0x0300 /* Area 5 accessed as burst ROM - interface, 16 cosequtive access */ -#define SH7750_BCR1_A5BST_ROM32 0x0400 /* Area 5 accessed as burst ROM - interface, 32 cosequtive access */ +#define SH7750_BCR1_A5BST 0x00000700 /* Area 5 Burst ROM Control */ +#define SH7750_BCR1_A5BST_SRAM 0x0000 /* Area 5 accessed as SRAM i/f */ +#define SH7750_BCR1_A5BST_ROM4 0x0100 /* Area 5 accessed as burst ROM */ + /* interface, 4 cosequtive access */ +#define SH7750_BCR1_A5BST_ROM8 0x0200 /* Area 5 accessed as burst ROM */ + /* interface, 8 cosequtive access */ +#define SH7750_BCR1_A5BST_ROM16 0x0300 /* Area 5 accessed as burst ROM */ + /* interface, 16 cosequtive access */ +#define SH7750_BCR1_A5BST_ROM32 0x0400 /* Area 5 accessed as burst ROM */ + /* interface, 32 cosequtive access */ -#define SH7750_BCR1_A6BST 0x000000E0 /* Area 6 Burst ROM Control */ -#define SH7750_BCR1_A6BST_SRAM 0x0000 /* Area 6 accessed as SRAM i/f */ -#define SH7750_BCR1_A6BST_ROM4 0x0020 /* Area 6 accessed as burst ROM - interface, 4 cosequtive access */ -#define SH7750_BCR1_A6BST_ROM8 0x0040 /* Area 6 accessed as burst ROM - interface, 8 cosequtive access */ -#define SH7750_BCR1_A6BST_ROM16 0x0060 /* Area 6 accessed as burst ROM - interface, 16 cosequtive access */ -#define SH7750_BCR1_A6BST_ROM32 0x0080 /* Area 6 accessed as burst ROM - interface, 32 cosequtive access */ +#define SH7750_BCR1_A6BST 0x000000E0 /* Area 6 Burst ROM Control */ +#define SH7750_BCR1_A6BST_SRAM 0x0000 /* Area 6 accessed as SRAM i/f */ +#define SH7750_BCR1_A6BST_ROM4 0x0020 /* Area 6 accessed as burst ROM */ + /* interface, 4 cosequtive access */ +#define SH7750_BCR1_A6BST_ROM8 0x0040 /* Area 6 accessed as burst ROM */ + /* interface, 8 cosequtive access */ +#define SH7750_BCR1_A6BST_ROM16 0x0060 /* Area 6 accessed as burst ROM */ + /* interface, 16 cosequtive access */ +#define SH7750_BCR1_A6BST_ROM32 0x0080 /* Area 6 accessed as burst ROM */ + /* interface, 32 cosequtive access */ -#define SH7750_BCR1_DRAMTP 0x001C /* Area 2 and 3 Memory Type */ -#define SH7750_BCR1_DRAMTP_2SRAM_3SRAM 0x0000 /* Area 2 and 3 are SRAM or MPX - interface. */ -#define SH7750_BCR1_DRAMTP_2SRAM_3SDRAM 0x0008 /* Area 2 - SRAM/MPX, Area 3 - - synchronous DRAM */ -#define SH7750_BCR1_DRAMTP_2SDRAM_3SDRAM 0x000C /* Area 2 and 3 are synchronous - DRAM interface */ -#define SH7750_BCR1_DRAMTP_2SRAM_3DRAM 0x0010 /* Area 2 - SRAM/MPX, Area 3 - - DRAM interface */ -#define SH7750_BCR1_DRAMTP_2DRAM_3DRAM 0x0014 /* Area 2 and 3 are DRAM - interface */ +#define SH7750_BCR1_DRAMTP 0x001C /* Area 2 and 3 Memory Type */ +#define SH7750_BCR1_DRAMTP_2SRAM_3SRAM 0x0000 /* Area 2 and 3 are SRAM or */ + /* MPX interface. */ +#define SH7750_BCR1_DRAMTP_2SRAM_3SDRAM 0x0008 /* Area 2 - SRAM/MPX, Area 3 */ + /* synchronous DRAM */ +#define SH7750_BCR1_DRAMTP_2SDRAM_3SDRAM 0x000C /* Area 2 and 3 are */ + /* synchronous DRAM interface */ +#define SH7750_BCR1_DRAMTP_2SRAM_3DRAM 0x0010 /* Area 2 - SRAM/MPX, Area 3 */ + /* DRAM interface */ +#define SH7750_BCR1_DRAMTP_2DRAM_3DRAM 0x0014 /* Area 2 and 3 are DRAM */ + /* interface */ -#define SH7750_BCR1_A56PCM 0x00000001 /* Area 5 and 6 Bus Type: - 0 - SRAM interface - 1 - PCMCIA interface */ +#define SH7750_BCR1_A56PCM 0x00000001 /* Area 5 and 6 Bus Type: */ + /* 0 - SRAM interface */ + /* 1 - PCMCIA interface */ /* Bus Control Register 2 (half) - BCR2 */ -#define SH7750_BCR2_REGOFS 0x800004 /* offset */ +#define SH7750_BCR2_REGOFS 0x800004 /* offset */ #define SH7750_BCR2 SH7750_P4_REG32(SH7750_BCR2_REGOFS) #define SH7750_BCR2_A7 SH7750_A7_REG32(SH7750_BCR2_REGOFS) -#define SH7750_BCR2_A0SZ 0xC000 /* Area 0 Bus Width */ +#define SH7750_BCR2_A0SZ 0xC000 /* Area 0 Bus Width */ #define SH7750_BCR2_A0SZ_S 14 -#define SH7750_BCR2_A6SZ 0x3000 /* Area 6 Bus Width */ +#define SH7750_BCR2_A6SZ 0x3000 /* Area 6 Bus Width */ #define SH7750_BCR2_A6SZ_S 12 -#define SH7750_BCR2_A5SZ 0x0C00 /* Area 5 Bus Width */ +#define SH7750_BCR2_A5SZ 0x0C00 /* Area 5 Bus Width */ #define SH7750_BCR2_A5SZ_S 10 -#define SH7750_BCR2_A4SZ 0x0300 /* Area 4 Bus Width */ +#define SH7750_BCR2_A4SZ 0x0300 /* Area 4 Bus Width */ #define SH7750_BCR2_A4SZ_S 8 -#define SH7750_BCR2_A3SZ 0x00C0 /* Area 3 Bus Width */ +#define SH7750_BCR2_A3SZ 0x00C0 /* Area 3 Bus Width */ #define SH7750_BCR2_A3SZ_S 6 -#define SH7750_BCR2_A2SZ 0x0030 /* Area 2 Bus Width */ +#define SH7750_BCR2_A2SZ 0x0030 /* Area 2 Bus Width */ #define SH7750_BCR2_A2SZ_S 4 -#define SH7750_BCR2_A1SZ 0x000C /* Area 1 Bus Width */ +#define SH7750_BCR2_A1SZ 0x000C /* Area 1 Bus Width */ #define SH7750_BCR2_A1SZ_S 2 -#define SH7750_BCR2_SZ_64 0 /* 64 bits */ -#define SH7750_BCR2_SZ_8 1 /* 8 bits */ -#define SH7750_BCR2_SZ_16 2 /* 16 bits */ -#define SH7750_BCR2_SZ_32 3 /* 32 bits */ -#define SH7750_BCR2_PORTEN 0x0001 /* Port Function Enable : - 0 - D51-D32 are not used as a port - 1 - D51-D32 are used as a port */ +#define SH7750_BCR2_SZ_64 0 /* 64 bits */ +#define SH7750_BCR2_SZ_8 1 /* 8 bits */ +#define SH7750_BCR2_SZ_16 2 /* 16 bits */ +#define SH7750_BCR2_SZ_32 3 /* 32 bits */ +#define SH7750_BCR2_PORTEN 0x0001 /* Port Function Enable */ + /* 0 - D51-D32 are not used as a port */ + /* 1 - D51-D32 are used as a port */ /* Wait Control Register 1 - WCR1 */ -#define SH7750_WCR1_REGOFS 0x800008 /* offset */ +#define SH7750_WCR1_REGOFS 0x800008 /* offset */ #define SH7750_WCR1 SH7750_P4_REG32(SH7750_WCR1_REGOFS) #define SH7750_WCR1_A7 SH7750_A7_REG32(SH7750_WCR1_REGOFS) -#define SH7750_WCR1_DMAIW 0x70000000 /* DACK Device Inter-Cycle Idle - specification */ +#define SH7750_WCR1_DMAIW 0x70000000 /* DACK Device Inter-Cycle Idle */ + /* specification */ #define SH7750_WCR1_DMAIW_S 28 -#define SH7750_WCR1_A6IW 0x07000000 /* Area 6 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A6IW 0x07000000 /* Area 6 Inter-Cycle Idle spec. */ #define SH7750_WCR1_A6IW_S 24 -#define SH7750_WCR1_A5IW 0x00700000 /* Area 5 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A5IW 0x00700000 /* Area 5 Inter-Cycle Idle spec. */ #define SH7750_WCR1_A5IW_S 20 -#define SH7750_WCR1_A4IW 0x00070000 /* Area 4 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A4IW 0x00070000 /* Area 4 Inter-Cycle Idle spec. */ #define SH7750_WCR1_A4IW_S 16 -#define SH7750_WCR1_A3IW 0x00007000 /* Area 3 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A3IW 0x00007000 /* Area 3 Inter-Cycle Idle spec. */ #define SH7750_WCR1_A3IW_S 12 -#define SH7750_WCR1_A2IW 0x00000700 /* Area 2 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A2IW 0x00000700 /* Area 2 Inter-Cycle Idle spec. */ #define SH7750_WCR1_A2IW_S 8 -#define SH7750_WCR1_A1IW 0x00000070 /* Area 1 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A1IW 0x00000070 /* Area 1 Inter-Cycle Idle spec. */ #define SH7750_WCR1_A1IW_S 4 -#define SH7750_WCR1_A0IW 0x00000007 /* Area 0 Inter-Cycle Idle spec. */ +#define SH7750_WCR1_A0IW 0x00000007 /* Area 0 Inter-Cycle Idle spec. */ #define SH7750_WCR1_A0IW_S 0 /* Wait Control Register 2 - WCR2 */ -#define SH7750_WCR2_REGOFS 0x80000C /* offset */ +#define SH7750_WCR2_REGOFS 0x80000C /* offset */ #define SH7750_WCR2 SH7750_P4_REG32(SH7750_WCR2_REGOFS) #define SH7750_WCR2_A7 SH7750_A7_REG32(SH7750_WCR2_REGOFS) -#define SH7750_WCR2_A6W 0xE0000000 /* Area 6 Wait Control */ +#define SH7750_WCR2_A6W 0xE0000000 /* Area 6 Wait Control */ #define SH7750_WCR2_A6W_S 29 -#define SH7750_WCR2_A6B 0x1C000000 /* Area 6 Burst Pitch */ +#define SH7750_WCR2_A6B 0x1C000000 /* Area 6 Burst Pitch */ #define SH7750_WCR2_A6B_S 26 -#define SH7750_WCR2_A5W 0x03800000 /* Area 5 Wait Control */ +#define SH7750_WCR2_A5W 0x03800000 /* Area 5 Wait Control */ #define SH7750_WCR2_A5W_S 23 -#define SH7750_WCR2_A5B 0x00700000 /* Area 5 Burst Pitch */ +#define SH7750_WCR2_A5B 0x00700000 /* Area 5 Burst Pitch */ #define SH7750_WCR2_A5B_S 20 -#define SH7750_WCR2_A4W 0x000E0000 /* Area 4 Wait Control */ +#define SH7750_WCR2_A4W 0x000E0000 /* Area 4 Wait Control */ #define SH7750_WCR2_A4W_S 17 -#define SH7750_WCR2_A3W 0x0000E000 /* Area 3 Wait Control */ +#define SH7750_WCR2_A3W 0x0000E000 /* Area 3 Wait Control */ #define SH7750_WCR2_A3W_S 13 -#define SH7750_WCR2_A2W 0x00000E00 /* Area 2 Wait Control */ +#define SH7750_WCR2_A2W 0x00000E00 /* Area 2 Wait Control */ #define SH7750_WCR2_A2W_S 9 -#define SH7750_WCR2_A1W 0x000001C0 /* Area 1 Wait Control */ +#define SH7750_WCR2_A1W 0x000001C0 /* Area 1 Wait Control */ #define SH7750_WCR2_A1W_S 6 -#define SH7750_WCR2_A0W 0x00000038 /* Area 0 Wait Control */ +#define SH7750_WCR2_A0W 0x00000038 /* Area 0 Wait Control */ #define SH7750_WCR2_A0W_S 3 -#define SH7750_WCR2_A0B 0x00000007 /* Area 0 Burst Pitch */ +#define SH7750_WCR2_A0B 0x00000007 /* Area 0 Burst Pitch */ #define SH7750_WCR2_A0B_S 0 -#define SH7750_WCR2_WS0 0 /* 0 wait states inserted */ -#define SH7750_WCR2_WS1 1 /* 1 wait states inserted */ -#define SH7750_WCR2_WS2 2 /* 2 wait states inserted */ -#define SH7750_WCR2_WS3 3 /* 3 wait states inserted */ -#define SH7750_WCR2_WS6 4 /* 6 wait states inserted */ -#define SH7750_WCR2_WS9 5 /* 9 wait states inserted */ -#define SH7750_WCR2_WS12 6 /* 12 wait states inserted */ -#define SH7750_WCR2_WS15 7 /* 15 wait states inserted */ +#define SH7750_WCR2_WS0 0 /* 0 wait states inserted */ +#define SH7750_WCR2_WS1 1 /* 1 wait states inserted */ +#define SH7750_WCR2_WS2 2 /* 2 wait states inserted */ +#define SH7750_WCR2_WS3 3 /* 3 wait states inserted */ +#define SH7750_WCR2_WS6 4 /* 6 wait states inserted */ +#define SH7750_WCR2_WS9 5 /* 9 wait states inserted */ +#define SH7750_WCR2_WS12 6 /* 12 wait states inserted */ +#define SH7750_WCR2_WS15 7 /* 15 wait states inserted */ -#define SH7750_WCR2_BPWS0 0 /* 0 wait states inserted from 2nd access */ -#define SH7750_WCR2_BPWS1 1 /* 1 wait states inserted from 2nd access */ -#define SH7750_WCR2_BPWS2 2 /* 2 wait states inserted from 2nd access */ -#define SH7750_WCR2_BPWS3 3 /* 3 wait states inserted from 2nd access */ -#define SH7750_WCR2_BPWS4 4 /* 4 wait states inserted from 2nd access */ -#define SH7750_WCR2_BPWS5 5 /* 5 wait states inserted from 2nd access */ -#define SH7750_WCR2_BPWS6 6 /* 6 wait states inserted from 2nd access */ -#define SH7750_WCR2_BPWS7 7 /* 7 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS0 0 /* 0 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS1 1 /* 1 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS2 2 /* 2 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS3 3 /* 3 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS4 4 /* 4 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS5 5 /* 5 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS6 6 /* 6 wait states inserted from 2nd access */ +#define SH7750_WCR2_BPWS7 7 /* 7 wait states inserted from 2nd access */ /* DRAM CAS\ Assertion Delay (area 3,2) */ -#define SH7750_WCR2_DRAM_CAS_ASW1 0 /* 1 cycle */ -#define SH7750_WCR2_DRAM_CAS_ASW2 1 /* 2 cycles */ -#define SH7750_WCR2_DRAM_CAS_ASW3 2 /* 3 cycles */ -#define SH7750_WCR2_DRAM_CAS_ASW4 3 /* 4 cycles */ -#define SH7750_WCR2_DRAM_CAS_ASW7 4 /* 7 cycles */ -#define SH7750_WCR2_DRAM_CAS_ASW10 5 /* 10 cycles */ -#define SH7750_WCR2_DRAM_CAS_ASW13 6 /* 13 cycles */ -#define SH7750_WCR2_DRAM_CAS_ASW16 7 /* 16 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW1 0 /* 1 cycle */ +#define SH7750_WCR2_DRAM_CAS_ASW2 1 /* 2 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW3 2 /* 3 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW4 3 /* 4 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW7 4 /* 7 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW10 5 /* 10 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW13 6 /* 13 cycles */ +#define SH7750_WCR2_DRAM_CAS_ASW16 7 /* 16 cycles */ /* SDRAM CAS\ Latency Cycles */ -#define SH7750_WCR2_SDRAM_CAS_LAT1 1 /* 1 cycle */ -#define SH7750_WCR2_SDRAM_CAS_LAT2 2 /* 2 cycles */ -#define SH7750_WCR2_SDRAM_CAS_LAT3 3 /* 3 cycles */ -#define SH7750_WCR2_SDRAM_CAS_LAT4 4 /* 4 cycles */ -#define SH7750_WCR2_SDRAM_CAS_LAT5 5 /* 5 cycles */ +#define SH7750_WCR2_SDRAM_CAS_LAT1 1 /* 1 cycle */ +#define SH7750_WCR2_SDRAM_CAS_LAT2 2 /* 2 cycles */ +#define SH7750_WCR2_SDRAM_CAS_LAT3 3 /* 3 cycles */ +#define SH7750_WCR2_SDRAM_CAS_LAT4 4 /* 4 cycles */ +#define SH7750_WCR2_SDRAM_CAS_LAT5 5 /* 5 cycles */ /* Wait Control Register 3 - WCR3 */ -#define SH7750_WCR3_REGOFS 0x800010 /* offset */ +#define SH7750_WCR3_REGOFS 0x800010 /* offset */ #define SH7750_WCR3 SH7750_P4_REG32(SH7750_WCR3_REGOFS) #define SH7750_WCR3_A7 SH7750_A7_REG32(SH7750_WCR3_REGOFS) -#define SH7750_WCR3_A6S 0x04000000 /* Area 6 Write Strobe Setup time */ -#define SH7750_WCR3_A6H 0x03000000 /* Area 6 Data Hold Time */ +#define SH7750_WCR3_A6S 0x04000000 /* Area 6 Write Strobe Setup time */ +#define SH7750_WCR3_A6H 0x03000000 /* Area 6 Data Hold Time */ #define SH7750_WCR3_A6H_S 24 -#define SH7750_WCR3_A5S 0x00400000 /* Area 5 Write Strobe Setup time */ -#define SH7750_WCR3_A5H 0x00300000 /* Area 5 Data Hold Time */ +#define SH7750_WCR3_A5S 0x00400000 /* Area 5 Write Strobe Setup time */ +#define SH7750_WCR3_A5H 0x00300000 /* Area 5 Data Hold Time */ #define SH7750_WCR3_A5H_S 20 -#define SH7750_WCR3_A4S 0x00040000 /* Area 4 Write Strobe Setup time */ -#define SH7750_WCR3_A4H 0x00030000 /* Area 4 Data Hold Time */ +#define SH7750_WCR3_A4S 0x00040000 /* Area 4 Write Strobe Setup time */ +#define SH7750_WCR3_A4H 0x00030000 /* Area 4 Data Hold Time */ #define SH7750_WCR3_A4H_S 16 -#define SH7750_WCR3_A3S 0x00004000 /* Area 3 Write Strobe Setup time */ -#define SH7750_WCR3_A3H 0x00003000 /* Area 3 Data Hold Time */ +#define SH7750_WCR3_A3S 0x00004000 /* Area 3 Write Strobe Setup time */ +#define SH7750_WCR3_A3H 0x00003000 /* Area 3 Data Hold Time */ #define SH7750_WCR3_A3H_S 12 -#define SH7750_WCR3_A2S 0x00000400 /* Area 2 Write Strobe Setup time */ -#define SH7750_WCR3_A2H 0x00000300 /* Area 2 Data Hold Time */ +#define SH7750_WCR3_A2S 0x00000400 /* Area 2 Write Strobe Setup time */ +#define SH7750_WCR3_A2H 0x00000300 /* Area 2 Data Hold Time */ #define SH7750_WCR3_A2H_S 8 -#define SH7750_WCR3_A1S 0x00000040 /* Area 1 Write Strobe Setup time */ -#define SH7750_WCR3_A1H 0x00000030 /* Area 1 Data Hold Time */ +#define SH7750_WCR3_A1S 0x00000040 /* Area 1 Write Strobe Setup time */ +#define SH7750_WCR3_A1H 0x00000030 /* Area 1 Data Hold Time */ #define SH7750_WCR3_A1H_S 4 -#define SH7750_WCR3_A0S 0x00000004 /* Area 0 Write Strobe Setup time */ -#define SH7750_WCR3_A0H 0x00000003 /* Area 0 Data Hold Time */ +#define SH7750_WCR3_A0S 0x00000004 /* Area 0 Write Strobe Setup time */ +#define SH7750_WCR3_A0H 0x00000003 /* Area 0 Data Hold Time */ #define SH7750_WCR3_A0H_S 0 -#define SH7750_WCR3_DHWS_0 0 /* 0 wait states data hold time */ -#define SH7750_WCR3_DHWS_1 1 /* 1 wait states data hold time */ -#define SH7750_WCR3_DHWS_2 2 /* 2 wait states data hold time */ -#define SH7750_WCR3_DHWS_3 3 /* 3 wait states data hold time */ +#define SH7750_WCR3_DHWS_0 0 /* 0 wait states data hold time */ +#define SH7750_WCR3_DHWS_1 1 /* 1 wait states data hold time */ +#define SH7750_WCR3_DHWS_2 2 /* 2 wait states data hold time */ +#define SH7750_WCR3_DHWS_3 3 /* 3 wait states data hold time */ -#define SH7750_MCR_REGOFS 0x800014 /* offset */ +#define SH7750_MCR_REGOFS 0x800014 /* offset */ #define SH7750_MCR SH7750_P4_REG32(SH7750_MCR_REGOFS) #define SH7750_MCR_A7 SH7750_A7_REG32(SH7750_MCR_REGOFS) -#define SH7750_MCR_RASD 0x80000000 /* RAS Down mode */ -#define SH7750_MCR_MRSET 0x40000000 /* SDRAM Mode Register Set */ -#define SH7750_MCR_PALL 0x00000000 /* SDRAM Precharge All cmd. Mode */ -#define SH7750_MCR_TRC 0x38000000 /* RAS Precharge Time at End of - Refresh: */ -#define SH7750_MCR_TRC_0 0x00000000 /* 0 */ -#define SH7750_MCR_TRC_3 0x08000000 /* 3 */ -#define SH7750_MCR_TRC_6 0x10000000 /* 6 */ -#define SH7750_MCR_TRC_9 0x18000000 /* 9 */ -#define SH7750_MCR_TRC_12 0x20000000 /* 12 */ -#define SH7750_MCR_TRC_15 0x28000000 /* 15 */ -#define SH7750_MCR_TRC_18 0x30000000 /* 18 */ -#define SH7750_MCR_TRC_21 0x38000000 /* 21 */ +#define SH7750_MCR_RASD 0x80000000 /* RAS Down mode */ +#define SH7750_MCR_MRSET 0x40000000 /* SDRAM Mode Register Set */ +#define SH7750_MCR_PALL 0x00000000 /* SDRAM Precharge All cmd. Mode */ +#define SH7750_MCR_TRC 0x38000000 /* RAS Precharge Time at End of */ + /* Refresh: */ +#define SH7750_MCR_TRC_0 0x00000000 /* 0 */ +#define SH7750_MCR_TRC_3 0x08000000 /* 3 */ +#define SH7750_MCR_TRC_6 0x10000000 /* 6 */ +#define SH7750_MCR_TRC_9 0x18000000 /* 9 */ +#define SH7750_MCR_TRC_12 0x20000000 /* 12 */ +#define SH7750_MCR_TRC_15 0x28000000 /* 15 */ +#define SH7750_MCR_TRC_18 0x30000000 /* 18 */ +#define SH7750_MCR_TRC_21 0x38000000 /* 21 */ -#define SH7750_MCR_TCAS 0x00800000 /* CAS Negation Period */ -#define SH7750_MCR_TCAS_1 0x00000000 /* 1 */ -#define SH7750_MCR_TCAS_2 0x00800000 /* 2 */ +#define SH7750_MCR_TCAS 0x00800000 /* CAS Negation Period */ +#define SH7750_MCR_TCAS_1 0x00000000 /* 1 */ +#define SH7750_MCR_TCAS_2 0x00800000 /* 2 */ -#define SH7750_MCR_TPC 0x00380000 /* DRAM: RAS Precharge Period - SDRAM: minimum number of cycles - until the next bank active cmd - is output after precharging */ +#define SH7750_MCR_TPC 0x00380000 /* DRAM: RAS Precharge Period */ + /* SDRAM: minimum number of cycles */ + /* until the next bank active cmd */ + /* is output after precharging */ #define SH7750_MCR_TPC_S 19 -#define SH7750_MCR_TPC_SDRAM_1 0x00000000 /* 1 cycle */ -#define SH7750_MCR_TPC_SDRAM_2 0x00080000 /* 2 cycles */ -#define SH7750_MCR_TPC_SDRAM_3 0x00100000 /* 3 cycles */ -#define SH7750_MCR_TPC_SDRAM_4 0x00180000 /* 4 cycles */ -#define SH7750_MCR_TPC_SDRAM_5 0x00200000 /* 5 cycles */ -#define SH7750_MCR_TPC_SDRAM_6 0x00280000 /* 6 cycles */ -#define SH7750_MCR_TPC_SDRAM_7 0x00300000 /* 7 cycles */ -#define SH7750_MCR_TPC_SDRAM_8 0x00380000 /* 8 cycles */ +#define SH7750_MCR_TPC_SDRAM_1 0x00000000 /* 1 cycle */ +#define SH7750_MCR_TPC_SDRAM_2 0x00080000 /* 2 cycles */ +#define SH7750_MCR_TPC_SDRAM_3 0x00100000 /* 3 cycles */ +#define SH7750_MCR_TPC_SDRAM_4 0x00180000 /* 4 cycles */ +#define SH7750_MCR_TPC_SDRAM_5 0x00200000 /* 5 cycles */ +#define SH7750_MCR_TPC_SDRAM_6 0x00280000 /* 6 cycles */ +#define SH7750_MCR_TPC_SDRAM_7 0x00300000 /* 7 cycles */ +#define SH7750_MCR_TPC_SDRAM_8 0x00380000 /* 8 cycles */ -#define SH7750_MCR_RCD 0x00030000 /* DRAM: RAS-CAS Assertion Delay time - SDRAM: bank active-read/write cmd - delay time */ -#define SH7750_MCR_RCD_DRAM_2 0x00000000 /* DRAM delay 2 clocks */ -#define SH7750_MCR_RCD_DRAM_3 0x00010000 /* DRAM delay 3 clocks */ -#define SH7750_MCR_RCD_DRAM_4 0x00020000 /* DRAM delay 4 clocks */ -#define SH7750_MCR_RCD_DRAM_5 0x00030000 /* DRAM delay 5 clocks */ -#define SH7750_MCR_RCD_SDRAM_2 0x00010000 /* DRAM delay 2 clocks */ -#define SH7750_MCR_RCD_SDRAM_3 0x00020000 /* DRAM delay 3 clocks */ -#define SH7750_MCR_RCD_SDRAM_4 0x00030000 /* DRAM delay 4 clocks */ +#define SH7750_MCR_RCD 0x00030000 /* DRAM: RAS-CAS Assertion Delay */ + /* time */ + /* SDRAM: bank active-read/write */ + /* command delay time */ +#define SH7750_MCR_RCD_DRAM_2 0x00000000 /* DRAM delay 2 clocks */ +#define SH7750_MCR_RCD_DRAM_3 0x00010000 /* DRAM delay 3 clocks */ +#define SH7750_MCR_RCD_DRAM_4 0x00020000 /* DRAM delay 4 clocks */ +#define SH7750_MCR_RCD_DRAM_5 0x00030000 /* DRAM delay 5 clocks */ +#define SH7750_MCR_RCD_SDRAM_2 0x00010000 /* DRAM delay 2 clocks */ +#define SH7750_MCR_RCD_SDRAM_3 0x00020000 /* DRAM delay 3 clocks */ +#define SH7750_MCR_RCD_SDRAM_4 0x00030000 /* DRAM delay 4 clocks */ -#define SH7750_MCR_TRWL 0x0000E000 /* SDRAM Write Precharge Delay */ -#define SH7750_MCR_TRWL_1 0x00000000 /* 1 */ -#define SH7750_MCR_TRWL_2 0x00002000 /* 2 */ -#define SH7750_MCR_TRWL_3 0x00004000 /* 3 */ -#define SH7750_MCR_TRWL_4 0x00006000 /* 4 */ -#define SH7750_MCR_TRWL_5 0x00008000 /* 5 */ +#define SH7750_MCR_TRWL 0x0000E000 /* SDRAM Write Precharge Delay */ +#define SH7750_MCR_TRWL_1 0x00000000 /* 1 */ +#define SH7750_MCR_TRWL_2 0x00002000 /* 2 */ +#define SH7750_MCR_TRWL_3 0x00004000 /* 3 */ +#define SH7750_MCR_TRWL_4 0x00006000 /* 4 */ +#define SH7750_MCR_TRWL_5 0x00008000 /* 5 */ -#define SH7750_MCR_TRAS 0x00001C00 /* DRAM: CAS-Before-RAS Refresh RAS - asserting period - SDRAM: Command interval after - synchronous DRAM refresh */ -#define SH7750_MCR_TRAS_DRAM_2 0x00000000 /* 2 */ -#define SH7750_MCR_TRAS_DRAM_3 0x00000400 /* 3 */ -#define SH7750_MCR_TRAS_DRAM_4 0x00000800 /* 4 */ -#define SH7750_MCR_TRAS_DRAM_5 0x00000C00 /* 5 */ -#define SH7750_MCR_TRAS_DRAM_6 0x00001000 /* 6 */ -#define SH7750_MCR_TRAS_DRAM_7 0x00001400 /* 7 */ -#define SH7750_MCR_TRAS_DRAM_8 0x00001800 /* 8 */ -#define SH7750_MCR_TRAS_DRAM_9 0x00001C00 /* 9 */ +#define SH7750_MCR_TRAS 0x00001C00 /* DRAM: CAS-Before-RAS Refresh RAS */ + /* asserting period */ + /* SDRAM: Command interval after */ + /* synchronous DRAM refresh */ +#define SH7750_MCR_TRAS_DRAM_2 0x00000000 /* 2 */ +#define SH7750_MCR_TRAS_DRAM_3 0x00000400 /* 3 */ +#define SH7750_MCR_TRAS_DRAM_4 0x00000800 /* 4 */ +#define SH7750_MCR_TRAS_DRAM_5 0x00000C00 /* 5 */ +#define SH7750_MCR_TRAS_DRAM_6 0x00001000 /* 6 */ +#define SH7750_MCR_TRAS_DRAM_7 0x00001400 /* 7 */ +#define SH7750_MCR_TRAS_DRAM_8 0x00001800 /* 8 */ +#define SH7750_MCR_TRAS_DRAM_9 0x00001C00 /* 9 */ -#define SH7750_MCR_TRAS_SDRAM_TRC_4 0x00000000 /* 4 + TRC */ -#define SH7750_MCR_TRAS_SDRAM_TRC_5 0x00000400 /* 5 + TRC */ -#define SH7750_MCR_TRAS_SDRAM_TRC_6 0x00000800 /* 6 + TRC */ -#define SH7750_MCR_TRAS_SDRAM_TRC_7 0x00000C00 /* 7 + TRC */ -#define SH7750_MCR_TRAS_SDRAM_TRC_8 0x00001000 /* 8 + TRC */ -#define SH7750_MCR_TRAS_SDRAM_TRC_9 0x00001400 /* 9 + TRC */ -#define SH7750_MCR_TRAS_SDRAM_TRC_10 0x00001800 /* 10 + TRC */ -#define SH7750_MCR_TRAS_SDRAM_TRC_11 0x00001C00 /* 11 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_4 0x00000000 /* 4 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_5 0x00000400 /* 5 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_6 0x00000800 /* 6 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_7 0x00000C00 /* 7 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_8 0x00001000 /* 8 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_9 0x00001400 /* 9 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_10 0x00001800 /* 10 + TRC */ +#define SH7750_MCR_TRAS_SDRAM_TRC_11 0x00001C00 /* 11 + TRC */ -#define SH7750_MCR_BE 0x00000200 /* Burst Enable */ -#define SH7750_MCR_SZ 0x00000180 /* Memory Data Size */ -#define SH7750_MCR_SZ_64 0x00000000 /* 64 bits */ -#define SH7750_MCR_SZ_16 0x00000100 /* 16 bits */ -#define SH7750_MCR_SZ_32 0x00000180 /* 32 bits */ +#define SH7750_MCR_BE 0x00000200 /* Burst Enable */ +#define SH7750_MCR_SZ 0x00000180 /* Memory Data Size */ +#define SH7750_MCR_SZ_64 0x00000000 /* 64 bits */ +#define SH7750_MCR_SZ_16 0x00000100 /* 16 bits */ +#define SH7750_MCR_SZ_32 0x00000180 /* 32 bits */ -#define SH7750_MCR_AMX 0x00000078 /* Address Multiplexing */ +#define SH7750_MCR_AMX 0x00000078 /* Address Multiplexing */ #define SH7750_MCR_AMX_S 3 -#define SH7750_MCR_AMX_DRAM_8BIT_COL 0x00000000 /* 8-bit column addr */ -#define SH7750_MCR_AMX_DRAM_9BIT_COL 0x00000008 /* 9-bit column addr */ -#define SH7750_MCR_AMX_DRAM_10BIT_COL 0x00000010 /* 10-bit column addr */ -#define SH7750_MCR_AMX_DRAM_11BIT_COL 0x00000018 /* 11-bit column addr */ -#define SH7750_MCR_AMX_DRAM_12BIT_COL 0x00000020 /* 12-bit column addr */ +#define SH7750_MCR_AMX_DRAM_8BIT_COL 0x00000000 /* 8-bit column addr */ +#define SH7750_MCR_AMX_DRAM_9BIT_COL 0x00000008 /* 9-bit column addr */ +#define SH7750_MCR_AMX_DRAM_10BIT_COL 0x00000010 /* 10-bit column addr */ +#define SH7750_MCR_AMX_DRAM_11BIT_COL 0x00000018 /* 11-bit column addr */ +#define SH7750_MCR_AMX_DRAM_12BIT_COL 0x00000020 /* 12-bit column addr */ /* See SH7750 Hardware Manual for SDRAM address multiplexor selection */ -#define SH7750_MCR_RFSH 0x00000004 /* Refresh Control */ -#define SH7750_MCR_RMODE 0x00000002 /* Refresh Mode: */ -#define SH7750_MCR_RMODE_NORMAL 0x00000000 /* Normal Refresh Mode */ -#define SH7750_MCR_RMODE_SELF 0x00000002 /* Self-Refresh Mode */ -#define SH7750_MCR_RMODE_EDO 0x00000001 /* EDO Mode */ +#define SH7750_MCR_RFSH 0x00000004 /* Refresh Control */ +#define SH7750_MCR_RMODE 0x00000002 /* Refresh Mode: */ +#define SH7750_MCR_RMODE_NORMAL 0x00000000 /* Normal Refresh Mode */ +#define SH7750_MCR_RMODE_SELF 0x00000002 /* Self-Refresh Mode */ +#define SH7750_MCR_RMODE_EDO 0x00000001 /* EDO Mode */ /* SDRAM Mode Set address */ #define SH7750_SDRAM_MODE_A2_BASE 0xFF900000 @@ -894,119 +894,119 @@ /* PCMCIA Control Register (half) - PCR */ -#define SH7750_PCR_REGOFS 0x800018 /* offset */ +#define SH7750_PCR_REGOFS 0x800018 /* offset */ #define SH7750_PCR SH7750_P4_REG32(SH7750_PCR_REGOFS) #define SH7750_PCR_A7 SH7750_A7_REG32(SH7750_PCR_REGOFS) -#define SH7750_PCR_A5PCW 0xC000 /* Area 5 PCMCIA Wait - Number of wait - states to be added to the number of - waits specified by WCR2 in a low-speed - PCMCIA wait cycle */ -#define SH7750_PCR_A5PCW_0 0x0000 /* 0 waits inserted */ -#define SH7750_PCR_A5PCW_15 0x4000 /* 15 waits inserted */ -#define SH7750_PCR_A5PCW_30 0x8000 /* 30 waits inserted */ -#define SH7750_PCR_A5PCW_50 0xC000 /* 50 waits inserted */ +#define SH7750_PCR_A5PCW 0xC000 /* Area 5 PCMCIA Wait - Number of wait */ + /* states to be added to the number of */ + /* waits specified by WCR2 in a */ + /* low-speed PCMCIA wait cycle */ +#define SH7750_PCR_A5PCW_0 0x0000 /* 0 waits inserted */ +#define SH7750_PCR_A5PCW_15 0x4000 /* 15 waits inserted */ +#define SH7750_PCR_A5PCW_30 0x8000 /* 30 waits inserted */ +#define SH7750_PCR_A5PCW_50 0xC000 /* 50 waits inserted */ -#define SH7750_PCR_A6PCW 0x3000 /* Area 6 PCMCIA Wait - Number of wait - states to be added to the number of - waits specified by WCR2 in a low-speed - PCMCIA wait cycle */ -#define SH7750_PCR_A6PCW_0 0x0000 /* 0 waits inserted */ -#define SH7750_PCR_A6PCW_15 0x1000 /* 15 waits inserted */ -#define SH7750_PCR_A6PCW_30 0x2000 /* 30 waits inserted */ -#define SH7750_PCR_A6PCW_50 0x3000 /* 50 waits inserted */ +#define SH7750_PCR_A6PCW 0x3000 /* Area 6 PCMCIA Wait - Number of wait */ + /* states to be added to the number of */ + /* waits specified by WCR2 in a */ + /* low-speed PCMCIA wait cycle */ +#define SH7750_PCR_A6PCW_0 0x0000 /* 0 waits inserted */ +#define SH7750_PCR_A6PCW_15 0x1000 /* 15 waits inserted */ +#define SH7750_PCR_A6PCW_30 0x2000 /* 30 waits inserted */ +#define SH7750_PCR_A6PCW_50 0x3000 /* 50 waits inserted */ -#define SH7750_PCR_A5TED 0x0E00 /* Area 5 Address-OE\/WE\ Assertion Delay, - delay time from address output to - OE\/WE\ assertion on the connected - PCMCIA interface */ +#define SH7750_PCR_A5TED 0x0E00 /* Area 5 Addr-OE\/WE\ Assertion Delay */ + /* delay time from address output to */ + /* OE\/WE\ assertion on the connected */ + /* PCMCIA interface */ #define SH7750_PCR_A5TED_S 9 -#define SH7750_PCR_A6TED 0x01C0 /* Area 6 Address-OE\/WE\ Assertion Delay */ +#define SH7750_PCR_A6TED 0x01C0 /* Area 6 Addr-OE\/WE\ Assertion Delay */ #define SH7750_PCR_A6TED_S 6 -#define SH7750_PCR_TED_0WS 0 /* 0 Waits inserted */ -#define SH7750_PCR_TED_1WS 1 /* 1 Waits inserted */ -#define SH7750_PCR_TED_2WS 2 /* 2 Waits inserted */ -#define SH7750_PCR_TED_3WS 3 /* 3 Waits inserted */ -#define SH7750_PCR_TED_6WS 4 /* 6 Waits inserted */ -#define SH7750_PCR_TED_9WS 5 /* 9 Waits inserted */ -#define SH7750_PCR_TED_12WS 6 /* 12 Waits inserted */ -#define SH7750_PCR_TED_15WS 7 /* 15 Waits inserted */ +#define SH7750_PCR_TED_0WS 0 /* 0 Waits inserted */ +#define SH7750_PCR_TED_1WS 1 /* 1 Waits inserted */ +#define SH7750_PCR_TED_2WS 2 /* 2 Waits inserted */ +#define SH7750_PCR_TED_3WS 3 /* 3 Waits inserted */ +#define SH7750_PCR_TED_6WS 4 /* 6 Waits inserted */ +#define SH7750_PCR_TED_9WS 5 /* 9 Waits inserted */ +#define SH7750_PCR_TED_12WS 6 /* 12 Waits inserted */ +#define SH7750_PCR_TED_15WS 7 /* 15 Waits inserted */ -#define SH7750_PCR_A5TEH 0x0038 /* Area 5 OE\/WE\ Negation Address delay, - address hold delay time from OE\/WE\ - negation in a write on the connected - PCMCIA interface */ +#define SH7750_PCR_A5TEH 0x0038 /* Area 5 OE\/WE\ Negation Addr delay, */ + /* address hold delay time from OE\/WE\ */ + /* negation in a write on the connected */ + /* PCMCIA interface */ #define SH7750_PCR_A5TEH_S 3 -#define SH7750_PCR_A6TEH 0x0007 /* Area 6 OE\/WE\ Negation Address delay */ +#define SH7750_PCR_A6TEH 0x0007 /* Area 6 OE\/WE\ Negation Address delay */ #define SH7750_PCR_A6TEH_S 0 -#define SH7750_PCR_TEH_0WS 0 /* 0 Waits inserted */ -#define SH7750_PCR_TEH_1WS 1 /* 1 Waits inserted */ -#define SH7750_PCR_TEH_2WS 2 /* 2 Waits inserted */ -#define SH7750_PCR_TEH_3WS 3 /* 3 Waits inserted */ -#define SH7750_PCR_TEH_6WS 4 /* 6 Waits inserted */ -#define SH7750_PCR_TEH_9WS 5 /* 9 Waits inserted */ -#define SH7750_PCR_TEH_12WS 6 /* 12 Waits inserted */ -#define SH7750_PCR_TEH_15WS 7 /* 15 Waits inserted */ +#define SH7750_PCR_TEH_0WS 0 /* 0 Waits inserted */ +#define SH7750_PCR_TEH_1WS 1 /* 1 Waits inserted */ +#define SH7750_PCR_TEH_2WS 2 /* 2 Waits inserted */ +#define SH7750_PCR_TEH_3WS 3 /* 3 Waits inserted */ +#define SH7750_PCR_TEH_6WS 4 /* 6 Waits inserted */ +#define SH7750_PCR_TEH_9WS 5 /* 9 Waits inserted */ +#define SH7750_PCR_TEH_12WS 6 /* 12 Waits inserted */ +#define SH7750_PCR_TEH_15WS 7 /* 15 Waits inserted */ /* Refresh Timer Control/Status Register (half) - RTSCR */ -#define SH7750_RTCSR_REGOFS 0x80001C /* offset */ +#define SH7750_RTCSR_REGOFS 0x80001C /* offset */ #define SH7750_RTCSR SH7750_P4_REG32(SH7750_RTCSR_REGOFS) #define SH7750_RTCSR_A7 SH7750_A7_REG32(SH7750_RTCSR_REGOFS) -#define SH7750_RTCSR_KEY 0xA500 /* RTCSR write key */ -#define SH7750_RTCSR_CMF 0x0080 /* Compare-Match Flag (indicates a - match between the refresh timer - counter and refresh time constant) */ -#define SH7750_RTCSR_CMIE 0x0040 /* Compare-Match Interrupt Enable */ -#define SH7750_RTCSR_CKS 0x0038 /* Refresh Counter Clock Selects */ -#define SH7750_RTCSR_CKS_DIS 0x0000 /* Clock Input Disabled */ -#define SH7750_RTCSR_CKS_CKIO_DIV4 0x0008 /* Bus Clock / 4 */ -#define SH7750_RTCSR_CKS_CKIO_DIV16 0x0010 /* Bus Clock / 16 */ -#define SH7750_RTCSR_CKS_CKIO_DIV64 0x0018 /* Bus Clock / 64 */ -#define SH7750_RTCSR_CKS_CKIO_DIV256 0x0020 /* Bus Clock / 256 */ -#define SH7750_RTCSR_CKS_CKIO_DIV1024 0x0028 /* Bus Clock / 1024 */ -#define SH7750_RTCSR_CKS_CKIO_DIV2048 0x0030 /* Bus Clock / 2048 */ -#define SH7750_RTCSR_CKS_CKIO_DIV4096 0x0038 /* Bus Clock / 4096 */ +#define SH7750_RTCSR_KEY 0xA500 /* RTCSR write key */ +#define SH7750_RTCSR_CMF 0x0080 /* Compare-Match Flag (indicates a */ + /* match between the refresh timer */ + /* counter and refresh time constant) */ +#define SH7750_RTCSR_CMIE 0x0040 /* Compare-Match Interrupt Enable */ +#define SH7750_RTCSR_CKS 0x0038 /* Refresh Counter Clock Selects */ +#define SH7750_RTCSR_CKS_DIS 0x0000 /* Clock Input Disabled */ +#define SH7750_RTCSR_CKS_CKIO_DIV4 0x0008 /* Bus Clock / 4 */ +#define SH7750_RTCSR_CKS_CKIO_DIV16 0x0010 /* Bus Clock / 16 */ +#define SH7750_RTCSR_CKS_CKIO_DIV64 0x0018 /* Bus Clock / 64 */ +#define SH7750_RTCSR_CKS_CKIO_DIV256 0x0020 /* Bus Clock / 256 */ +#define SH7750_RTCSR_CKS_CKIO_DIV1024 0x0028 /* Bus Clock / 1024 */ +#define SH7750_RTCSR_CKS_CKIO_DIV2048 0x0030 /* Bus Clock / 2048 */ +#define SH7750_RTCSR_CKS_CKIO_DIV4096 0x0038 /* Bus Clock / 4096 */ -#define SH7750_RTCSR_OVF 0x0004 /* Refresh Count Overflow Flag */ -#define SH7750_RTCSR_OVIE 0x0002 /* Refresh Count Overflow Interrupt - Enable */ -#define SH7750_RTCSR_LMTS 0x0001 /* Refresh Count Overflow Limit Select */ -#define SH7750_RTCSR_LMTS_1024 0x0000 /* Count Limit is 1024 */ -#define SH7750_RTCSR_LMTS_512 0x0001 /* Count Limit is 512 */ +#define SH7750_RTCSR_OVF 0x0004 /* Refresh Count Overflow Flag */ +#define SH7750_RTCSR_OVIE 0x0002 /* Refresh Count Overflow Interrupt */ + /* Enable */ +#define SH7750_RTCSR_LMTS 0x0001 /* Refresh Count Overflow Limit Select */ +#define SH7750_RTCSR_LMTS_1024 0x0000 /* Count Limit is 1024 */ +#define SH7750_RTCSR_LMTS_512 0x0001 /* Count Limit is 512 */ /* Refresh Timer Counter (half) - RTCNT */ -#define SH7750_RTCNT_REGOFS 0x800020 /* offset */ +#define SH7750_RTCNT_REGOFS 0x800020 /* offset */ #define SH7750_RTCNT SH7750_P4_REG32(SH7750_RTCNT_REGOFS) #define SH7750_RTCNT_A7 SH7750_A7_REG32(SH7750_RTCNT_REGOFS) -#define SH7750_RTCNT_KEY 0xA500 /* RTCNT write key */ +#define SH7750_RTCNT_KEY 0xA500 /* RTCNT write key */ /* Refresh Time Constant Register (half) - RTCOR */ -#define SH7750_RTCOR_REGOFS 0x800024 /* offset */ +#define SH7750_RTCOR_REGOFS 0x800024 /* offset */ #define SH7750_RTCOR SH7750_P4_REG32(SH7750_RTCOR_REGOFS) #define SH7750_RTCOR_A7 SH7750_A7_REG32(SH7750_RTCOR_REGOFS) -#define SH7750_RTCOR_KEY 0xA500 /* RTCOR write key */ +#define SH7750_RTCOR_KEY 0xA500 /* RTCOR write key */ /* Refresh Count Register (half) - RFCR */ -#define SH7750_RFCR_REGOFS 0x800028 /* offset */ +#define SH7750_RFCR_REGOFS 0x800028 /* offset */ #define SH7750_RFCR SH7750_P4_REG32(SH7750_RFCR_REGOFS) #define SH7750_RFCR_A7 SH7750_A7_REG32(SH7750_RFCR_REGOFS) -#define SH7750_RFCR_KEY 0xA400 /* RFCR write key */ +#define SH7750_RFCR_KEY 0xA400 /* RFCR write key */ /* Synchronous DRAM mode registers - SDMR */ -#define SH7750_SDMR2_REGOFS 0x900000 /* base offset */ -#define SH7750_SDMR2_REGNB 0x0FFC /* nb of register */ +#define SH7750_SDMR2_REGOFS 0x900000 /* base offset */ +#define SH7750_SDMR2_REGNB 0x0FFC /* nb of register */ #define SH7750_SDMR2 SH7750_P4_REG32(SH7750_SDMR2_REGOFS) #define SH7750_SDMR2_A7 SH7750_A7_REG32(SH7750_SDMR2_REGOFS) -#define SH7750_SDMR3_REGOFS 0x940000 /* offset */ -#define SH7750_SDMR3_REGNB 0x0FFC /* nb of register */ +#define SH7750_SDMR3_REGOFS 0x940000 /* offset */ +#define SH7750_SDMR3_REGNB 0x0FFC /* nb of register */ #define SH7750_SDMR3 SH7750_P4_REG32(SH7750_SDMR3_REGOFS) #define SH7750_SDMR3_A7 SH7750_A7_REG32(SH7750_SDMR3_REGOFS) @@ -1015,7 +1015,7 @@ */ /* DMA Source Address Register - SAR0, SAR1, SAR2, SAR3 */ -#define SH7750_SAR_REGOFS(n) (0xA00000 + ((n)*16)) /* offset */ +#define SH7750_SAR_REGOFS(n) (0xA00000 + ((n) * 16)) /* offset */ #define SH7750_SAR(n) SH7750_P4_REG32(SH7750_SAR_REGOFS(n)) #define SH7750_SAR_A7(n) SH7750_A7_REG32(SH7750_SAR_REGOFS(n)) #define SH7750_SAR0 SH7750_SAR(0) @@ -1028,7 +1028,7 @@ #define SH7750_SAR3_A7 SH7750_SAR_A7(3) /* DMA Destination Address Register - DAR0, DAR1, DAR2, DAR3 */ -#define SH7750_DAR_REGOFS(n) (0xA00004 + ((n)*16)) /* offset */ +#define SH7750_DAR_REGOFS(n) (0xA00004 + ((n) * 16)) /* offset */ #define SH7750_DAR(n) SH7750_P4_REG32(SH7750_DAR_REGOFS(n)) #define SH7750_DAR_A7(n) SH7750_A7_REG32(SH7750_DAR_REGOFS(n)) #define SH7750_DAR0 SH7750_DAR(0) @@ -1041,7 +1041,7 @@ #define SH7750_DAR3_A7 SH7750_DAR_A7(3) /* DMA Transfer Count Register - DMATCR0, DMATCR1, DMATCR2, DMATCR3 */ -#define SH7750_DMATCR_REGOFS(n) (0xA00008 + ((n)*16)) /* offset */ +#define SH7750_DMATCR_REGOFS(n) (0xA00008 + ((n) * 16)) /* offset */ #define SH7750_DMATCR(n) SH7750_P4_REG32(SH7750_DMATCR_REGOFS(n)) #define SH7750_DMATCR_A7(n) SH7750_A7_REG32(SH7750_DMATCR_REGOFS(n)) #define SH7750_DMATCR0_P4 SH7750_DMATCR(0) @@ -1054,7 +1054,7 @@ #define SH7750_DMATCR3_A7 SH7750_DMATCR_A7(3) /* DMA Channel Control Register - CHCR0, CHCR1, CHCR2, CHCR3 */ -#define SH7750_CHCR_REGOFS(n) (0xA0000C + ((n)*16)) /* offset */ +#define SH7750_CHCR_REGOFS(n) (0xA0000C + ((n) * 16)) /* offset */ #define SH7750_CHCR(n) SH7750_P4_REG32(SH7750_CHCR_REGOFS(n)) #define SH7750_CHCR_A7(n) SH7750_A7_REG32(SH7750_CHCR_REGOFS(n)) #define SH7750_CHCR0 SH7750_CHCR(0) @@ -1066,227 +1066,227 @@ #define SH7750_CHCR2_A7 SH7750_CHCR_A7(2) #define SH7750_CHCR3_A7 SH7750_CHCR_A7(3) -#define SH7750_CHCR_SSA 0xE0000000 /* Source Address Space Attribute */ -#define SH7750_CHCR_SSA_PCMCIA 0x00000000 /* Reserved in PCMCIA access */ -#define SH7750_CHCR_SSA_DYNBSZ 0x20000000 /* Dynamic Bus Sizing I/O space */ -#define SH7750_CHCR_SSA_IO8 0x40000000 /* 8-bit I/O space */ -#define SH7750_CHCR_SSA_IO16 0x60000000 /* 16-bit I/O space */ -#define SH7750_CHCR_SSA_CMEM8 0x80000000 /* 8-bit common memory space */ -#define SH7750_CHCR_SSA_CMEM16 0xA0000000 /* 16-bit common memory space */ -#define SH7750_CHCR_SSA_AMEM8 0xC0000000 /* 8-bit attribute memory space */ -#define SH7750_CHCR_SSA_AMEM16 0xE0000000 /* 16-bit attribute memory space */ +#define SH7750_CHCR_SSA 0xE0000000 /* Source Address Space Attribute */ +#define SH7750_CHCR_SSA_PCMCIA 0x00000000 /* Reserved in PCMCIA access */ +#define SH7750_CHCR_SSA_DYNBSZ 0x20000000 /* Dynamic Bus Sizing I/O space */ +#define SH7750_CHCR_SSA_IO8 0x40000000 /* 8-bit I/O space */ +#define SH7750_CHCR_SSA_IO16 0x60000000 /* 16-bit I/O space */ +#define SH7750_CHCR_SSA_CMEM8 0x80000000 /* 8-bit common memory space */ +#define SH7750_CHCR_SSA_CMEM16 0xA0000000 /* 16-bit common memory space */ +#define SH7750_CHCR_SSA_AMEM8 0xC0000000 /* 8-bit attribute memory space */ +#define SH7750_CHCR_SSA_AMEM16 0xE0000000 /* 16-bit attribute memory space */ -#define SH7750_CHCR_STC 0x10000000 /* Source Address Wait Control Select, - specifies CS5 or CS6 space wait - control for PCMCIA access */ +#define SH7750_CHCR_STC 0x10000000 /* Source Addr Wait Control Select */ + /* specifies CS5 or CS6 space wait */ + /* control for PCMCIA access */ -#define SH7750_CHCR_DSA 0x0E000000 /* Source Address Space Attribute */ -#define SH7750_CHCR_DSA_PCMCIA 0x00000000 /* Reserved in PCMCIA access */ -#define SH7750_CHCR_DSA_DYNBSZ 0x02000000 /* Dynamic Bus Sizing I/O space */ -#define SH7750_CHCR_DSA_IO8 0x04000000 /* 8-bit I/O space */ -#define SH7750_CHCR_DSA_IO16 0x06000000 /* 16-bit I/O space */ -#define SH7750_CHCR_DSA_CMEM8 0x08000000 /* 8-bit common memory space */ -#define SH7750_CHCR_DSA_CMEM16 0x0A000000 /* 16-bit common memory space */ -#define SH7750_CHCR_DSA_AMEM8 0x0C000000 /* 8-bit attribute memory space */ -#define SH7750_CHCR_DSA_AMEM16 0x0E000000 /* 16-bit attribute memory space */ +#define SH7750_CHCR_DSA 0x0E000000 /* Source Address Space Attribute */ +#define SH7750_CHCR_DSA_PCMCIA 0x00000000 /* Reserved in PCMCIA access */ +#define SH7750_CHCR_DSA_DYNBSZ 0x02000000 /* Dynamic Bus Sizing I/O space */ +#define SH7750_CHCR_DSA_IO8 0x04000000 /* 8-bit I/O space */ +#define SH7750_CHCR_DSA_IO16 0x06000000 /* 16-bit I/O space */ +#define SH7750_CHCR_DSA_CMEM8 0x08000000 /* 8-bit common memory space */ +#define SH7750_CHCR_DSA_CMEM16 0x0A000000 /* 16-bit common memory space */ +#define SH7750_CHCR_DSA_AMEM8 0x0C000000 /* 8-bit attribute memory space */ +#define SH7750_CHCR_DSA_AMEM16 0x0E000000 /* 16-bit attribute memory space */ -#define SH7750_CHCR_DTC 0x01000000 /* Destination Address Wait Control - Select, specifies CS5 or CS6 - space wait control for PCMCIA - access */ +#define SH7750_CHCR_DTC 0x01000000 /* Destination Address Wait Control */ + /* Select, specifies CS5 or CS6 */ + /* space wait control for PCMCIA */ + /* access */ -#define SH7750_CHCR_DS 0x00080000 /* DREQ\ Select : */ -#define SH7750_CHCR_DS_LOWLVL 0x00000000 /* Low Level Detection */ -#define SH7750_CHCR_DS_FALL 0x00080000 /* Falling Edge Detection */ +#define SH7750_CHCR_DS 0x00080000 /* DREQ\ Select : */ +#define SH7750_CHCR_DS_LOWLVL 0x00000000 /* Low Level Detection */ +#define SH7750_CHCR_DS_FALL 0x00080000 /* Falling Edge Detection */ -#define SH7750_CHCR_RL 0x00040000 /* Request Check Level: */ -#define SH7750_CHCR_RL_ACTH 0x00000000 /* DRAK is an active high out */ -#define SH7750_CHCR_RL_ACTL 0x00040000 /* DRAK is an active low out */ +#define SH7750_CHCR_RL 0x00040000 /* Request Check Level: */ +#define SH7750_CHCR_RL_ACTH 0x00000000 /* DRAK is an active high out */ +#define SH7750_CHCR_RL_ACTL 0x00040000 /* DRAK is an active low out */ -#define SH7750_CHCR_AM 0x00020000 /* Acknowledge Mode: */ -#define SH7750_CHCR_AM_RD 0x00000000 /* DACK is output in read cycle */ -#define SH7750_CHCR_AM_WR 0x00020000 /* DACK is output in write cycle */ +#define SH7750_CHCR_AM 0x00020000 /* Acknowledge Mode: */ +#define SH7750_CHCR_AM_RD 0x00000000 /* DACK is output in read cycle */ +#define SH7750_CHCR_AM_WR 0x00020000 /* DACK is output in write cycle */ -#define SH7750_CHCR_AL 0x00010000 /* Acknowledge Level: */ -#define SH7750_CHCR_AL_ACTH 0x00000000 /* DACK is an active high out */ -#define SH7750_CHCR_AL_ACTL 0x00010000 /* DACK is an active low out */ +#define SH7750_CHCR_AL 0x00010000 /* Acknowledge Level: */ +#define SH7750_CHCR_AL_ACTH 0x00000000 /* DACK is an active high out */ +#define SH7750_CHCR_AL_ACTL 0x00010000 /* DACK is an active low out */ -#define SH7750_CHCR_DM 0x0000C000 /* Destination Address Mode: */ -#define SH7750_CHCR_DM_FIX 0x00000000 /* Destination Addr Fixed */ -#define SH7750_CHCR_DM_INC 0x00004000 /* Destination Addr Incremented */ -#define SH7750_CHCR_DM_DEC 0x00008000 /* Destination Addr Decremented */ +#define SH7750_CHCR_DM 0x0000C000 /* Destination Address Mode: */ +#define SH7750_CHCR_DM_FIX 0x00000000 /* Destination Addr Fixed */ +#define SH7750_CHCR_DM_INC 0x00004000 /* Destination Addr Incremented */ +#define SH7750_CHCR_DM_DEC 0x00008000 /* Destination Addr Decremented */ -#define SH7750_CHCR_SM 0x00003000 /* Source Address Mode: */ -#define SH7750_CHCR_SM_FIX 0x00000000 /* Source Addr Fixed */ -#define SH7750_CHCR_SM_INC 0x00001000 /* Source Addr Incremented */ -#define SH7750_CHCR_SM_DEC 0x00002000 /* Source Addr Decremented */ +#define SH7750_CHCR_SM 0x00003000 /* Source Address Mode: */ +#define SH7750_CHCR_SM_FIX 0x00000000 /* Source Addr Fixed */ +#define SH7750_CHCR_SM_INC 0x00001000 /* Source Addr Incremented */ +#define SH7750_CHCR_SM_DEC 0x00002000 /* Source Addr Decremented */ -#define SH7750_CHCR_RS 0x00000F00 /* Request Source Select: */ -#define SH7750_CHCR_RS_ER_DA_EA_TO_EA 0x000 /* External Request, Dual Address - Mode (External Addr Space-> - External Addr Space) */ -#define SH7750_CHCR_RS_ER_SA_EA_TO_ED 0x200 /* External Request, Single - Address Mode (External Addr - Space -> External Device) */ -#define SH7750_CHCR_RS_ER_SA_ED_TO_EA 0x300 /* External Request, Single - Address Mode, (External - Device -> External Addr - Space) */ -#define SH7750_CHCR_RS_AR_EA_TO_EA 0x400 /* Auto-Request (External Addr - Space -> External Addr Space) */ +#define SH7750_CHCR_RS 0x00000F00 /* Request Source Select: */ +#define SH7750_CHCR_RS_ER_DA_EA_TO_EA 0x000 /* External Request, Dual Addr */ + /* Mode, External Addr Space */ + /* -> External Addr Space) */ +#define SH7750_CHCR_RS_ER_SA_EA_TO_ED 0x200 /* External Request, Single */ + /* Address Mode (Ext. Addr */ + /* Space -> External Device) */ +#define SH7750_CHCR_RS_ER_SA_ED_TO_EA 0x300 /* External Request, Single */ + /* Address Mode, (External */ + /* Device -> External Addr */ + /* Space) */ +#define SH7750_CHCR_RS_AR_EA_TO_EA 0x400 /* Auto-Request (External Addr */ + /* Space -> Ext. Addr Space) */ -#define SH7750_CHCR_RS_AR_EA_TO_OCP 0x500 /* Auto-Request (External Addr - Space -> On-chip Peripheral - Module) */ -#define SH7750_CHCR_RS_AR_OCP_TO_EA 0x600 /* Auto-Request (On-chip - Peripheral Module -> - External Addr Space */ -#define SH7750_CHCR_RS_SCITX_EA_TO_SC 0x800 /* SCI Transmit-Data-Empty intr - transfer request (external - address space -> SCTDR1) */ -#define SH7750_CHCR_RS_SCIRX_SC_TO_EA 0x900 /* SCI Receive-Data-Full intr - transfer request (SCRDR1 -> - External Addr Space) */ -#define SH7750_CHCR_RS_SCIFTX_EA_TO_SC 0xA00 /* SCIF Transmit-Data-Empty intr - transfer request (external - address space -> SCFTDR1) */ -#define SH7750_CHCR_RS_SCIFRX_SC_TO_EA 0xB00 /* SCIF Receive-Data-Full intr - transfer request (SCFRDR2 -> - External Addr Space) */ -#define SH7750_CHCR_RS_TMU2_EA_TO_EA 0xC00 /* TMU Channel 2 (input capture - interrupt), (external address - space -> external address - space) */ -#define SH7750_CHCR_RS_TMU2_EA_TO_OCP 0xD00 /* TMU Channel 2 (input capture - interrupt), (external address - space -> on-chip peripheral - module) */ -#define SH7750_CHCR_RS_TMU2_OCP_TO_EA 0xE00 /* TMU Channel 2 (input capture - interrupt), (on-chip - peripheral module -> external - address space) */ +#define SH7750_CHCR_RS_AR_EA_TO_OCP 0x500 /* Auto-Request (External Addr */ + /* Space -> On-chip */ + /* Peripheral Module) */ +#define SH7750_CHCR_RS_AR_OCP_TO_EA 0x600 /* Auto-Request (On-chip */ + /* Peripheral Module -> */ + /* External Addr Space */ +#define SH7750_CHCR_RS_SCITX_EA_TO_SC 0x800 /* SCI Transmit-Data-Empty intr */ + /* transfer request (external */ + /* address space -> SCTDR1) */ +#define SH7750_CHCR_RS_SCIRX_SC_TO_EA 0x900 /* SCI Receive-Data-Full intr */ + /* transfer request (SCRDR1 */ + /* -> External Addr Space) */ +#define SH7750_CHCR_RS_SCIFTX_EA_TO_SC 0xA00 /* SCIF TX-Data-Empty intr */ + /* transfer request (external */ + /* address space -> SCFTDR1) */ +#define SH7750_CHCR_RS_SCIFRX_SC_TO_EA 0xB00 /* SCIF Receive-Data-Full intr */ + /* transfer request (SCFRDR2 */ + /* -> External Addr Space) */ +#define SH7750_CHCR_RS_TMU2_EA_TO_EA 0xC00 /* TMU Channel 2 (input capture */ + /* interrupt), (external */ + /* address space -> external */ + /* address space) */ +#define SH7750_CHCR_RS_TMU2_EA_TO_OCP 0xD00 /* TMU Channel 2 (input capture */ + /* interrupt), (external */ + /* address space -> on-chip */ + /* peripheral module) */ +#define SH7750_CHCR_RS_TMU2_OCP_TO_EA 0xE00 /* TMU Channel 2 (input capture */ + /* interrupt), (on-chip */ + /* peripheral module -> */ + /* external address space) */ -#define SH7750_CHCR_TM 0x00000080 /* Transmit mode: */ -#define SH7750_CHCR_TM_CSTEAL 0x00000000 /* Cycle Steal Mode */ -#define SH7750_CHCR_TM_BURST 0x00000080 /* Burst Mode */ +#define SH7750_CHCR_TM 0x00000080 /* Transmit mode: */ +#define SH7750_CHCR_TM_CSTEAL 0x00000000 /* Cycle Steal Mode */ +#define SH7750_CHCR_TM_BURST 0x00000080 /* Burst Mode */ -#define SH7750_CHCR_TS 0x00000070 /* Transmit Size: */ -#define SH7750_CHCR_TS_QUAD 0x00000000 /* Quadword Size (64 bits) */ -#define SH7750_CHCR_TS_BYTE 0x00000010 /* Byte Size (8 bit) */ -#define SH7750_CHCR_TS_WORD 0x00000020 /* Word Size (16 bit) */ -#define SH7750_CHCR_TS_LONG 0x00000030 /* Longword Size (32 bit) */ -#define SH7750_CHCR_TS_BLOCK 0x00000040 /* 32-byte block transfer */ +#define SH7750_CHCR_TS 0x00000070 /* Transmit Size: */ +#define SH7750_CHCR_TS_QUAD 0x00000000 /* Quadword Size (64 bits) */ +#define SH7750_CHCR_TS_BYTE 0x00000010 /* Byte Size (8 bit) */ +#define SH7750_CHCR_TS_WORD 0x00000020 /* Word Size (16 bit) */ +#define SH7750_CHCR_TS_LONG 0x00000030 /* Longword Size (32 bit) */ +#define SH7750_CHCR_TS_BLOCK 0x00000040 /* 32-byte block transfer */ -#define SH7750_CHCR_IE 0x00000004 /* Interrupt Enable */ -#define SH7750_CHCR_TE 0x00000002 /* Transfer End */ -#define SH7750_CHCR_DE 0x00000001 /* DMAC Enable */ +#define SH7750_CHCR_IE 0x00000004 /* Interrupt Enable */ +#define SH7750_CHCR_TE 0x00000002 /* Transfer End */ +#define SH7750_CHCR_DE 0x00000001 /* DMAC Enable */ /* DMA Operation Register - DMAOR */ -#define SH7750_DMAOR_REGOFS 0xA00040 /* offset */ +#define SH7750_DMAOR_REGOFS 0xA00040 /* offset */ #define SH7750_DMAOR SH7750_P4_REG32(SH7750_DMAOR_REGOFS) #define SH7750_DMAOR_A7 SH7750_A7_REG32(SH7750_DMAOR_REGOFS) -#define SH7750_DMAOR_DDT 0x00008000 /* On-Demand Data Transfer Mode */ +#define SH7750_DMAOR_DDT 0x00008000 /* On-Demand Data Transfer Mode */ -#define SH7750_DMAOR_PR 0x00000300 /* Priority Mode: */ -#define SH7750_DMAOR_PR_0123 0x00000000 /* CH0 > CH1 > CH2 > CH3 */ -#define SH7750_DMAOR_PR_0231 0x00000100 /* CH0 > CH2 > CH3 > CH1 */ -#define SH7750_DMAOR_PR_2013 0x00000200 /* CH2 > CH0 > CH1 > CH3 */ -#define SH7750_DMAOR_PR_RR 0x00000300 /* Round-robin mode */ +#define SH7750_DMAOR_PR 0x00000300 /* Priority Mode: */ +#define SH7750_DMAOR_PR_0123 0x00000000 /* CH0 > CH1 > CH2 > CH3 */ +#define SH7750_DMAOR_PR_0231 0x00000100 /* CH0 > CH2 > CH3 > CH1 */ +#define SH7750_DMAOR_PR_2013 0x00000200 /* CH2 > CH0 > CH1 > CH3 */ +#define SH7750_DMAOR_PR_RR 0x00000300 /* Round-robin mode */ -#define SH7750_DMAOR_COD 0x00000010 /* Check Overrun for DREQ\ */ -#define SH7750_DMAOR_AE 0x00000004 /* Address Error flag */ -#define SH7750_DMAOR_NMIF 0x00000002 /* NMI Flag */ -#define SH7750_DMAOR_DME 0x00000001 /* DMAC Master Enable */ +#define SH7750_DMAOR_COD 0x00000010 /* Check Overrun for DREQ\ */ +#define SH7750_DMAOR_AE 0x00000004 /* Address Error flag */ +#define SH7750_DMAOR_NMIF 0x00000002 /* NMI Flag */ +#define SH7750_DMAOR_DME 0x00000001 /* DMAC Master Enable */ /* * I/O Ports */ /* Port Control Register A - PCTRA */ -#define SH7750_PCTRA_REGOFS 0x80002C /* offset */ +#define SH7750_PCTRA_REGOFS 0x80002C /* offset */ #define SH7750_PCTRA SH7750_P4_REG32(SH7750_PCTRA_REGOFS) #define SH7750_PCTRA_A7 SH7750_A7_REG32(SH7750_PCTRA_REGOFS) -#define SH7750_PCTRA_PBPUP(n) 0 /* Bit n is pulled up */ -#define SH7750_PCTRA_PBNPUP(n) (1 << ((n)*2+1)) /* Bit n is not pulled up */ -#define SH7750_PCTRA_PBINP(n) 0 /* Bit n is an input */ -#define SH7750_PCTRA_PBOUT(n) (1 << ((n)*2)) /* Bit n is an output */ +#define SH7750_PCTRA_PBPUP(n) 0 /* Bit n is pulled up */ +#define SH7750_PCTRA_PBNPUP(n) (1 << ((n) * 2 + 1)) /* Bit n is not pulled up */ +#define SH7750_PCTRA_PBINP(n) 0 /* Bit n is an input */ +#define SH7750_PCTRA_PBOUT(n) (1 << ((n) * 2)) /* Bit n is an output */ /* Port Data Register A - PDTRA(half) */ -#define SH7750_PDTRA_REGOFS 0x800030 /* offset */ +#define SH7750_PDTRA_REGOFS 0x800030 /* offset */ #define SH7750_PDTRA SH7750_P4_REG32(SH7750_PDTRA_REGOFS) #define SH7750_PDTRA_A7 SH7750_A7_REG32(SH7750_PDTRA_REGOFS) #define SH7750_PDTRA_BIT(n) (1 << (n)) /* Port Control Register B - PCTRB */ -#define SH7750_PCTRB_REGOFS 0x800040 /* offset */ +#define SH7750_PCTRB_REGOFS 0x800040 /* offset */ #define SH7750_PCTRB SH7750_P4_REG32(SH7750_PCTRB_REGOFS) #define SH7750_PCTRB_A7 SH7750_A7_REG32(SH7750_PCTRB_REGOFS) -#define SH7750_PCTRB_PBPUP(n) 0 /* Bit n is pulled up */ -#define SH7750_PCTRB_PBNPUP(n) (1 << ((n-16)*2+1)) /* Bit n is not pulled up */ -#define SH7750_PCTRB_PBINP(n) 0 /* Bit n is an input */ -#define SH7750_PCTRB_PBOUT(n) (1 << ((n-16)*2)) /* Bit n is an output */ +#define SH7750_PCTRB_PBPUP(n) 0 /* Bit n is pulled up */ +#define SH7750_PCTRB_PBNPUP(n) (1 << ((n - 16) * 2 + 1)) /* Bit n is not pulled up */ +#define SH7750_PCTRB_PBINP(n) 0 /* Bit n is an input */ +#define SH7750_PCTRB_PBOUT(n) (1 << ((n - 16) * 2)) /* Bit n is an output */ /* Port Data Register B - PDTRB(half) */ -#define SH7750_PDTRB_REGOFS 0x800044 /* offset */ +#define SH7750_PDTRB_REGOFS 0x800044 /* offset */ #define SH7750_PDTRB SH7750_P4_REG32(SH7750_PDTRB_REGOFS) #define SH7750_PDTRB_A7 SH7750_A7_REG32(SH7750_PDTRB_REGOFS) -#define SH7750_PDTRB_BIT(n) (1 << ((n)-16)) +#define SH7750_PDTRB_BIT(n) (1 << ((n) - 16)) /* GPIO Interrupt Control Register - GPIOIC(half) */ -#define SH7750_GPIOIC_REGOFS 0x800048 /* offset */ +#define SH7750_GPIOIC_REGOFS 0x800048 /* offset */ #define SH7750_GPIOIC SH7750_P4_REG32(SH7750_GPIOIC_REGOFS) #define SH7750_GPIOIC_A7 SH7750_A7_REG32(SH7750_GPIOIC_REGOFS) -#define SH7750_GPIOIC_PTIREN(n) (1 << (n)) /* Port n is used as a GPIO int */ +#define SH7750_GPIOIC_PTIREN(n) (1 << (n)) /* Port n is used as a GPIO int */ /* * Interrupt Controller - INTC */ /* Interrupt Control Register - ICR (half) */ -#define SH7750_ICR_REGOFS 0xD00000 /* offset */ +#define SH7750_ICR_REGOFS 0xD00000 /* offset */ #define SH7750_ICR SH7750_P4_REG32(SH7750_ICR_REGOFS) #define SH7750_ICR_A7 SH7750_A7_REG32(SH7750_ICR_REGOFS) -#define SH7750_ICR_NMIL 0x8000 /* NMI Input Level */ -#define SH7750_ICR_MAI 0x4000 /* NMI Interrupt Mask */ +#define SH7750_ICR_NMIL 0x8000 /* NMI Input Level */ +#define SH7750_ICR_MAI 0x4000 /* NMI Interrupt Mask */ -#define SH7750_ICR_NMIB 0x0200 /* NMI Block Mode: */ -#define SH7750_ICR_NMIB_BLK 0x0000 /* NMI requests held pending while - SR.BL bit is set to 1 */ -#define SH7750_ICR_NMIB_NBLK 0x0200 /* NMI requests detected when SR.BL bit - set to 1 */ +#define SH7750_ICR_NMIB 0x0200 /* NMI Block Mode: */ +#define SH7750_ICR_NMIB_BLK 0x0000 /* NMI requests held pending while */ + /* SR.BL bit is set to 1 */ +#define SH7750_ICR_NMIB_NBLK 0x0200 /* NMI requests detected when SR.BL */ + /* bit set to 1 */ -#define SH7750_ICR_NMIE 0x0100 /* NMI Edge Select: */ -#define SH7750_ICR_NMIE_FALL 0x0000 /* Interrupt request detected on falling - edge of NMI input */ -#define SH7750_ICR_NMIE_RISE 0x0100 /* Interrupt request detected on rising - edge of NMI input */ +#define SH7750_ICR_NMIE 0x0100 /* NMI Edge Select: */ +#define SH7750_ICR_NMIE_FALL 0x0000 /* Interrupt request detected on */ + /* falling edge of NMI input */ +#define SH7750_ICR_NMIE_RISE 0x0100 /* Interrupt request detected on */ + /* rising edge of NMI input */ -#define SH7750_ICR_IRLM 0x0080 /* IRL Pin Mode: */ -#define SH7750_ICR_IRLM_ENC 0x0000 /* IRL\ pins used as a level-encoded - interrupt requests */ -#define SH7750_ICR_IRLM_RAW 0x0080 /* IRL\ pins used as a four independent - interrupt requests */ +#define SH7750_ICR_IRLM 0x0080 /* IRL Pin Mode: */ +#define SH7750_ICR_IRLM_ENC 0x0000 /* IRL\ pins used as a level-encoded */ + /* interrupt requests */ +#define SH7750_ICR_IRLM_RAW 0x0080 /* IRL\ pins used as a four */ + /* independent interrupt requests */ /* * User Break Controller registers */ -#define SH7750_BARA 0x200000 /* Break address regiser A */ -#define SH7750_BAMRA 0x200004 /* Break address mask regiser A */ -#define SH7750_BBRA 0x200008 /* Break bus cycle regiser A */ -#define SH7750_BARB 0x20000c /* Break address regiser B */ -#define SH7750_BAMRB 0x200010 /* Break address mask regiser B */ -#define SH7750_BBRB 0x200014 /* Break bus cycle regiser B */ -#define SH7750_BASRB 0x000018 /* Break ASID regiser B */ -#define SH7750_BDRB 0x200018 /* Break data regiser B */ -#define SH7750_BDMRB 0x20001c /* Break data mask regiser B */ -#define SH7750_BRCR 0x200020 /* Break control register */ +#define SH7750_BARA 0x200000 /* Break address regiser A */ +#define SH7750_BAMRA 0x200004 /* Break address mask regiser A */ +#define SH7750_BBRA 0x200008 /* Break bus cycle regiser A */ +#define SH7750_BARB 0x20000c /* Break address regiser B */ +#define SH7750_BAMRB 0x200010 /* Break address mask regiser B */ +#define SH7750_BBRB 0x200014 /* Break bus cycle regiser B */ +#define SH7750_BASRB 0x000018 /* Break ASID regiser B */ +#define SH7750_BDRB 0x200018 /* Break data regiser B */ +#define SH7750_BDMRB 0x20001c /* Break data mask regiser B */ +#define SH7750_BRCR 0x200020 /* Break control register */ -#define SH7750_BRCR_UDBE 0x0001 /* User break debug enable bit */ +#define SH7750_BRCR_UDBE 0x0001 /* User break debug enable bit */ /* * Missing in RTEMS, added for QEMU diff --git a/hw/sh4/shix.c b/hw/sh4/shix.c index b0579aa0f1..aa812512f0 100644 --- a/hw/sh4/shix.c +++ b/hw/sh4/shix.c @@ -22,11 +22,11 @@ * THE SOFTWARE. */ /* - Shix 2.0 board by Alexis Polti, described at - https://web.archive.org/web/20070917001736/perso.enst.fr/~polti/realisations/shix20 - - More information in target/sh4/README.sh4 -*/ + * Shix 2.0 board by Alexis Polti, described at + * https://web.archive.org/web/20070917001736/perso.enst.fr/~polti/realisations/shix20 + * + * More information in target/sh4/README.sh4 + */ #include "qemu/osdep.h" #include "qapi/error.h" #include "cpu.h" @@ -48,7 +48,7 @@ static void shix_init(MachineState *machine) MemoryRegion *rom = g_new(MemoryRegion, 1); MemoryRegion *sdram = g_new(MemoryRegion, 2); const char *bios_name = machine->firmware ?: BIOS_FILENAME; - + cpu = SUPERH_CPU(cpu_create(machine->cpu_type)); /* Allocate memory space */ diff --git a/hw/sh4/trace-events b/hw/sh4/trace-events new file mode 100644 index 0000000000..4b61cd56c8 --- /dev/null +++ b/hw/sh4/trace-events @@ -0,0 +1,3 @@ +# sh7750.c +sh7750_porta(uint16_t prev, uint16_t cur, uint16_t pdtr, uint16_t pctr) "porta changed from 0x%04x to 0x%04x\npdtra=0x%04x, pctra=0x%08x" +sh7750_portb(uint16_t prev, uint16_t cur, uint16_t pdtr, uint16_t pctr) "portb changed from 0x%04x to 0x%04x\npdtrb=0x%04x, pctrb=0x%08x" diff --git a/hw/sh4/trace.h b/hw/sh4/trace.h new file mode 100644 index 0000000000..e2c13323b7 --- /dev/null +++ b/hw/sh4/trace.h @@ -0,0 +1 @@ +#include "trace/trace-hw_sh4.h" diff --git a/hw/ssi/aspeed_smc.c b/hw/ssi/aspeed_smc.c index 331a2c5446..ff154eb84f 100644 --- a/hw/ssi/aspeed_smc.c +++ b/hw/ssi/aspeed_smc.c @@ -124,6 +124,13 @@ /* SPI dummy cycle data */ #define R_DUMMY_DATA (0x54 / 4) +/* FMC_WDT2 Control/Status Register for Alternate Boot (AST2600) */ +#define R_FMC_WDT2_CTRL (0x64 / 4) +#define FMC_WDT2_CTRL_ALT_BOOT_MODE BIT(6) /* O: 2 chips 1: 1 chip */ +#define FMC_WDT2_CTRL_SINGLE_BOOT_MODE BIT(5) +#define FMC_WDT2_CTRL_BOOT_SOURCE BIT(4) /* O: primary 1: alternate */ +#define FMC_WDT2_CTRL_EN BIT(0) + /* DMA Control/Status Register */ #define R_DMA_CTRL (0x80 / 4) #define DMA_CTRL_REQUEST (1 << 31) @@ -162,11 +169,6 @@ #define ASPEED_SMC_R_SPI_MAX (0x20 / 4) #define ASPEED_SMC_R_SMC_MAX (0x20 / 4) -#define ASPEED_SOC_SMC_FLASH_BASE 0x10000000 -#define ASPEED_SOC_FMC_FLASH_BASE 0x20000000 -#define ASPEED_SOC_SPI_FLASH_BASE 0x30000000 -#define ASPEED_SOC_SPI2_FLASH_BASE 0x38000000 - /* * DMA DRAM addresses should be 4 bytes aligned and the valid address * range is 0x40000000 - 0x5FFFFFFF (AST2400) @@ -179,8 +181,8 @@ * 0: 4 bytes * 0x7FFFFF: 32M bytes */ -#define DMA_DRAM_ADDR(s, val) ((val) & (s)->ctrl->dma_dram_mask) -#define DMA_FLASH_ADDR(s, val) ((val) & (s)->ctrl->dma_flash_mask) +#define DMA_DRAM_ADDR(asc, val) ((val) & (asc)->dma_dram_mask) +#define DMA_FLASH_ADDR(asc, val) ((val) & (asc)->dma_flash_mask) #define DMA_LENGTH(val) ((val) & 0x01FFFFFC) /* Flash opcodes. */ @@ -194,332 +196,48 @@ * controller. These can be changed when board is initialized with the * Segment Address Registers. */ -static const AspeedSegments aspeed_segments_legacy[] = { - { 0x10000000, 32 * 1024 * 1024 }, -}; - -static const AspeedSegments aspeed_segments_fmc[] = { - { 0x20000000, 64 * 1024 * 1024 }, /* start address is readonly */ - { 0x24000000, 32 * 1024 * 1024 }, - { 0x26000000, 32 * 1024 * 1024 }, - { 0x28000000, 32 * 1024 * 1024 }, - { 0x2A000000, 32 * 1024 * 1024 } -}; - -static const AspeedSegments aspeed_segments_spi[] = { - { 0x30000000, 64 * 1024 * 1024 }, -}; - -static const AspeedSegments aspeed_segments_ast2500_fmc[] = { - { 0x20000000, 128 * 1024 * 1024 }, /* start address is readonly */ - { 0x28000000, 32 * 1024 * 1024 }, - { 0x2A000000, 32 * 1024 * 1024 }, -}; - -static const AspeedSegments aspeed_segments_ast2500_spi1[] = { - { 0x30000000, 32 * 1024 * 1024 }, /* start address is readonly */ - { 0x32000000, 96 * 1024 * 1024 }, /* end address is readonly */ -}; - -static const AspeedSegments aspeed_segments_ast2500_spi2[] = { - { 0x38000000, 32 * 1024 * 1024 }, /* start address is readonly */ - { 0x3A000000, 96 * 1024 * 1024 }, /* end address is readonly */ -}; -static uint32_t aspeed_smc_segment_to_reg(const AspeedSMCState *s, - const AspeedSegments *seg); -static void aspeed_smc_reg_to_segment(const AspeedSMCState *s, uint32_t reg, - AspeedSegments *seg); -static void aspeed_smc_dma_ctrl(AspeedSMCState *s, uint32_t value); - -/* - * AST2600 definitions - */ -#define ASPEED26_SOC_FMC_FLASH_BASE 0x20000000 -#define ASPEED26_SOC_SPI_FLASH_BASE 0x30000000 -#define ASPEED26_SOC_SPI2_FLASH_BASE 0x50000000 - -static const AspeedSegments aspeed_segments_ast2600_fmc[] = { - { 0x0, 128 * MiB }, /* start address is readonly */ - { 128 * MiB, 128 * MiB }, /* default is disabled but needed for -kernel */ - { 0x0, 0 }, /* disabled */ -}; - -static const AspeedSegments aspeed_segments_ast2600_spi1[] = { - { 0x0, 128 * MiB }, /* start address is readonly */ - { 0x0, 0 }, /* disabled */ -}; - -static const AspeedSegments aspeed_segments_ast2600_spi2[] = { - { 0x0, 128 * MiB }, /* start address is readonly */ - { 0x0, 0 }, /* disabled */ - { 0x0, 0 }, /* disabled */ -}; - -static uint32_t aspeed_2600_smc_segment_to_reg(const AspeedSMCState *s, - const AspeedSegments *seg); -static void aspeed_2600_smc_reg_to_segment(const AspeedSMCState *s, - uint32_t reg, AspeedSegments *seg); -static void aspeed_2600_smc_dma_ctrl(AspeedSMCState *s, uint32_t value); +static const AspeedSegments aspeed_2500_spi1_segments[]; +static const AspeedSegments aspeed_2500_spi2_segments[]; #define ASPEED_SMC_FEATURE_DMA 0x1 #define ASPEED_SMC_FEATURE_DMA_GRANT 0x2 +#define ASPEED_SMC_FEATURE_WDT_CONTROL 0x4 -static inline bool aspeed_smc_has_dma(const AspeedSMCState *s) +static inline bool aspeed_smc_has_dma(const AspeedSMCClass *asc) { - return !!(s->ctrl->features & ASPEED_SMC_FEATURE_DMA); + return !!(asc->features & ASPEED_SMC_FEATURE_DMA); } -static const AspeedSMCController controllers[] = { - { - .name = "aspeed.smc-ast2400", - .r_conf = R_CONF, - .r_ce_ctrl = R_CE_CTRL, - .r_ctrl0 = R_CTRL0, - .r_timings = R_TIMINGS, - .nregs_timings = 1, - .conf_enable_w0 = CONF_ENABLE_W0, - .max_peripherals = 1, - .segments = aspeed_segments_legacy, - .flash_window_base = ASPEED_SOC_SMC_FLASH_BASE, - .flash_window_size = 0x6000000, - .features = 0x0, - .nregs = ASPEED_SMC_R_SMC_MAX, - .segment_to_reg = aspeed_smc_segment_to_reg, - .reg_to_segment = aspeed_smc_reg_to_segment, - .dma_ctrl = aspeed_smc_dma_ctrl, - }, { - .name = "aspeed.fmc-ast2400", - .r_conf = R_CONF, - .r_ce_ctrl = R_CE_CTRL, - .r_ctrl0 = R_CTRL0, - .r_timings = R_TIMINGS, - .nregs_timings = 1, - .conf_enable_w0 = CONF_ENABLE_W0, - .max_peripherals = 5, - .segments = aspeed_segments_fmc, - .flash_window_base = ASPEED_SOC_FMC_FLASH_BASE, - .flash_window_size = 0x10000000, - .features = ASPEED_SMC_FEATURE_DMA, - .dma_flash_mask = 0x0FFFFFFC, - .dma_dram_mask = 0x1FFFFFFC, - .nregs = ASPEED_SMC_R_MAX, - .segment_to_reg = aspeed_smc_segment_to_reg, - .reg_to_segment = aspeed_smc_reg_to_segment, - .dma_ctrl = aspeed_smc_dma_ctrl, - }, { - .name = "aspeed.spi1-ast2400", - .r_conf = R_SPI_CONF, - .r_ce_ctrl = 0xff, - .r_ctrl0 = R_SPI_CTRL0, - .r_timings = R_SPI_TIMINGS, - .nregs_timings = 1, - .conf_enable_w0 = SPI_CONF_ENABLE_W0, - .max_peripherals = 1, - .segments = aspeed_segments_spi, - .flash_window_base = ASPEED_SOC_SPI_FLASH_BASE, - .flash_window_size = 0x10000000, - .features = 0x0, - .nregs = ASPEED_SMC_R_SPI_MAX, - .segment_to_reg = aspeed_smc_segment_to_reg, - .reg_to_segment = aspeed_smc_reg_to_segment, - .dma_ctrl = aspeed_smc_dma_ctrl, - }, { - .name = "aspeed.fmc-ast2500", - .r_conf = R_CONF, - .r_ce_ctrl = R_CE_CTRL, - .r_ctrl0 = R_CTRL0, - .r_timings = R_TIMINGS, - .nregs_timings = 1, - .conf_enable_w0 = CONF_ENABLE_W0, - .max_peripherals = 3, - .segments = aspeed_segments_ast2500_fmc, - .flash_window_base = ASPEED_SOC_FMC_FLASH_BASE, - .flash_window_size = 0x10000000, - .features = ASPEED_SMC_FEATURE_DMA, - .dma_flash_mask = 0x0FFFFFFC, - .dma_dram_mask = 0x3FFFFFFC, - .nregs = ASPEED_SMC_R_MAX, - .segment_to_reg = aspeed_smc_segment_to_reg, - .reg_to_segment = aspeed_smc_reg_to_segment, - .dma_ctrl = aspeed_smc_dma_ctrl, - }, { - .name = "aspeed.spi1-ast2500", - .r_conf = R_CONF, - .r_ce_ctrl = R_CE_CTRL, - .r_ctrl0 = R_CTRL0, - .r_timings = R_TIMINGS, - .nregs_timings = 1, - .conf_enable_w0 = CONF_ENABLE_W0, - .max_peripherals = 2, - .segments = aspeed_segments_ast2500_spi1, - .flash_window_base = ASPEED_SOC_SPI_FLASH_BASE, - .flash_window_size = 0x8000000, - .features = 0x0, - .nregs = ASPEED_SMC_R_MAX, - .segment_to_reg = aspeed_smc_segment_to_reg, - .reg_to_segment = aspeed_smc_reg_to_segment, - .dma_ctrl = aspeed_smc_dma_ctrl, - }, { - .name = "aspeed.spi2-ast2500", - .r_conf = R_CONF, - .r_ce_ctrl = R_CE_CTRL, - .r_ctrl0 = R_CTRL0, - .r_timings = R_TIMINGS, - .nregs_timings = 1, - .conf_enable_w0 = CONF_ENABLE_W0, - .max_peripherals = 2, - .segments = aspeed_segments_ast2500_spi2, - .flash_window_base = ASPEED_SOC_SPI2_FLASH_BASE, - .flash_window_size = 0x8000000, - .features = 0x0, - .nregs = ASPEED_SMC_R_MAX, - .segment_to_reg = aspeed_smc_segment_to_reg, - .reg_to_segment = aspeed_smc_reg_to_segment, - .dma_ctrl = aspeed_smc_dma_ctrl, - }, { - .name = "aspeed.fmc-ast2600", - .r_conf = R_CONF, - .r_ce_ctrl = R_CE_CTRL, - .r_ctrl0 = R_CTRL0, - .r_timings = R_TIMINGS, - .nregs_timings = 1, - .conf_enable_w0 = CONF_ENABLE_W0, - .max_peripherals = 3, - .segments = aspeed_segments_ast2600_fmc, - .flash_window_base = ASPEED26_SOC_FMC_FLASH_BASE, - .flash_window_size = 0x10000000, - .features = ASPEED_SMC_FEATURE_DMA, - .dma_flash_mask = 0x0FFFFFFC, - .dma_dram_mask = 0x3FFFFFFC, - .nregs = ASPEED_SMC_R_MAX, - .segment_to_reg = aspeed_2600_smc_segment_to_reg, - .reg_to_segment = aspeed_2600_smc_reg_to_segment, - .dma_ctrl = aspeed_2600_smc_dma_ctrl, - }, { - .name = "aspeed.spi1-ast2600", - .r_conf = R_CONF, - .r_ce_ctrl = R_CE_CTRL, - .r_ctrl0 = R_CTRL0, - .r_timings = R_TIMINGS, - .nregs_timings = 2, - .conf_enable_w0 = CONF_ENABLE_W0, - .max_peripherals = 2, - .segments = aspeed_segments_ast2600_spi1, - .flash_window_base = ASPEED26_SOC_SPI_FLASH_BASE, - .flash_window_size = 0x10000000, - .features = ASPEED_SMC_FEATURE_DMA | - ASPEED_SMC_FEATURE_DMA_GRANT, - .dma_flash_mask = 0x0FFFFFFC, - .dma_dram_mask = 0x3FFFFFFC, - .nregs = ASPEED_SMC_R_MAX, - .segment_to_reg = aspeed_2600_smc_segment_to_reg, - .reg_to_segment = aspeed_2600_smc_reg_to_segment, - .dma_ctrl = aspeed_2600_smc_dma_ctrl, - }, { - .name = "aspeed.spi2-ast2600", - .r_conf = R_CONF, - .r_ce_ctrl = R_CE_CTRL, - .r_ctrl0 = R_CTRL0, - .r_timings = R_TIMINGS, - .nregs_timings = 3, - .conf_enable_w0 = CONF_ENABLE_W0, - .max_peripherals = 3, - .segments = aspeed_segments_ast2600_spi2, - .flash_window_base = ASPEED26_SOC_SPI2_FLASH_BASE, - .flash_window_size = 0x10000000, - .features = ASPEED_SMC_FEATURE_DMA | - ASPEED_SMC_FEATURE_DMA_GRANT, - .dma_flash_mask = 0x0FFFFFFC, - .dma_dram_mask = 0x3FFFFFFC, - .nregs = ASPEED_SMC_R_MAX, - .segment_to_reg = aspeed_2600_smc_segment_to_reg, - .reg_to_segment = aspeed_2600_smc_reg_to_segment, - .dma_ctrl = aspeed_2600_smc_dma_ctrl, - }, -}; - -/* - * The Segment Registers of the AST2400 and AST2500 have a 8MB - * unit. The address range of a flash SPI peripheral is encoded with - * absolute addresses which should be part of the overall controller - * window. - */ -static uint32_t aspeed_smc_segment_to_reg(const AspeedSMCState *s, - const AspeedSegments *seg) +static inline bool aspeed_smc_has_wdt_control(const AspeedSMCClass *asc) { - uint32_t reg = 0; - reg |= ((seg->addr >> 23) & SEG_START_MASK) << SEG_START_SHIFT; - reg |= (((seg->addr + seg->size) >> 23) & SEG_END_MASK) << SEG_END_SHIFT; - return reg; + return !!(asc->features & ASPEED_SMC_FEATURE_WDT_CONTROL); } -static void aspeed_smc_reg_to_segment(const AspeedSMCState *s, - uint32_t reg, AspeedSegments *seg) -{ - seg->addr = ((reg >> SEG_START_SHIFT) & SEG_START_MASK) << 23; - seg->size = (((reg >> SEG_END_SHIFT) & SEG_END_MASK) << 23) - seg->addr; -} - -/* - * The Segment Registers of the AST2600 have a 1MB unit. The address - * range of a flash SPI peripheral is encoded with offsets in the overall - * controller window. The previous SoC AST2400 and AST2500 used - * absolute addresses. Only bits [27:20] are relevant and the end - * address is an upper bound limit. - */ -#define AST2600_SEG_ADDR_MASK 0x0ff00000 - -static uint32_t aspeed_2600_smc_segment_to_reg(const AspeedSMCState *s, - const AspeedSegments *seg) -{ - uint32_t reg = 0; - - /* Disabled segments have a nil register */ - if (!seg->size) { - return 0; - } - - reg |= (seg->addr & AST2600_SEG_ADDR_MASK) >> 16; /* start offset */ - reg |= (seg->addr + seg->size - 1) & AST2600_SEG_ADDR_MASK; /* end offset */ - return reg; -} - -static void aspeed_2600_smc_reg_to_segment(const AspeedSMCState *s, - uint32_t reg, AspeedSegments *seg) -{ - uint32_t start_offset = (reg << 16) & AST2600_SEG_ADDR_MASK; - uint32_t end_offset = reg & AST2600_SEG_ADDR_MASK; - - if (reg) { - seg->addr = s->ctrl->flash_window_base + start_offset; - seg->size = end_offset + MiB - start_offset; - } else { - seg->addr = s->ctrl->flash_window_base; - seg->size = 0; - } -} +#define aspeed_smc_error(fmt, ...) \ + qemu_log_mask(LOG_GUEST_ERROR, "%s: " fmt "\n", __func__, ## __VA_ARGS__) static bool aspeed_smc_flash_overlap(const AspeedSMCState *s, const AspeedSegments *new, int cs) { + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); AspeedSegments seg; int i; - for (i = 0; i < s->ctrl->max_peripherals; i++) { + for (i = 0; i < asc->max_peripherals; i++) { if (i == cs) { continue; } - s->ctrl->reg_to_segment(s, s->regs[R_SEG_ADDR0 + i], &seg); + asc->reg_to_segment(s, s->regs[R_SEG_ADDR0 + i], &seg); if (new->addr + new->size > seg.addr && new->addr < seg.addr + seg.size) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: new segment CS%d [ 0x%" - HWADDR_PRIx" - 0x%"HWADDR_PRIx" ] overlaps with " - "CS%d [ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n", - s->ctrl->name, cs, new->addr, new->addr + new->size, - i, seg.addr, seg.addr + seg.size); + aspeed_smc_error("new segment CS%d [ 0x%" + HWADDR_PRIx" - 0x%"HWADDR_PRIx" ] overlaps with " + "CS%d [ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]", + cs, new->addr, new->addr + new->size, + i, seg.addr, seg.addr + seg.size); return true; } } @@ -529,14 +247,15 @@ static bool aspeed_smc_flash_overlap(const AspeedSMCState *s, static void aspeed_smc_flash_set_segment_region(AspeedSMCState *s, int cs, uint64_t regval) { + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); AspeedSMCFlash *fl = &s->flashes[cs]; AspeedSegments seg; - s->ctrl->reg_to_segment(s, regval, &seg); + asc->reg_to_segment(s, regval, &seg); memory_region_transaction_begin(); memory_region_set_size(&fl->mmio, seg.size); - memory_region_set_address(&fl->mmio, seg.addr - s->ctrl->flash_window_base); + memory_region_set_address(&fl->mmio, seg.addr - asc->flash_window_base); memory_region_set_enabled(&fl->mmio, !!seg.size); memory_region_transaction_commit(); @@ -546,53 +265,52 @@ static void aspeed_smc_flash_set_segment_region(AspeedSMCState *s, int cs, static void aspeed_smc_flash_set_segment(AspeedSMCState *s, int cs, uint64_t new) { + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); AspeedSegments seg; - s->ctrl->reg_to_segment(s, new, &seg); + asc->reg_to_segment(s, new, &seg); trace_aspeed_smc_flash_set_segment(cs, new, seg.addr, seg.addr + seg.size); /* The start address of CS0 is read-only */ - if (cs == 0 && seg.addr != s->ctrl->flash_window_base) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Tried to change CS0 start address to 0x%" - HWADDR_PRIx "\n", s->ctrl->name, seg.addr); - seg.addr = s->ctrl->flash_window_base; - new = s->ctrl->segment_to_reg(s, &seg); + if (cs == 0 && seg.addr != asc->flash_window_base) { + aspeed_smc_error("Tried to change CS0 start address to 0x%" + HWADDR_PRIx, seg.addr); + seg.addr = asc->flash_window_base; + new = asc->segment_to_reg(s, &seg); } /* * The end address of the AST2500 spi controllers is also * read-only. */ - if ((s->ctrl->segments == aspeed_segments_ast2500_spi1 || - s->ctrl->segments == aspeed_segments_ast2500_spi2) && - cs == s->ctrl->max_peripherals && - seg.addr + seg.size != s->ctrl->segments[cs].addr + - s->ctrl->segments[cs].size) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: Tried to change CS%d end address to 0x%" - HWADDR_PRIx "\n", s->ctrl->name, cs, seg.addr + seg.size); - seg.size = s->ctrl->segments[cs].addr + s->ctrl->segments[cs].size - + if ((asc->segments == aspeed_2500_spi1_segments || + asc->segments == aspeed_2500_spi2_segments) && + cs == asc->max_peripherals && + seg.addr + seg.size != asc->segments[cs].addr + + asc->segments[cs].size) { + aspeed_smc_error("Tried to change CS%d end address to 0x%" + HWADDR_PRIx, cs, seg.addr + seg.size); + seg.size = asc->segments[cs].addr + asc->segments[cs].size - seg.addr; - new = s->ctrl->segment_to_reg(s, &seg); + new = asc->segment_to_reg(s, &seg); } /* Keep the segment in the overall flash window */ if (seg.size && - (seg.addr + seg.size <= s->ctrl->flash_window_base || - seg.addr > s->ctrl->flash_window_base + s->ctrl->flash_window_size)) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: new segment for CS%d is invalid : " - "[ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n", - s->ctrl->name, cs, seg.addr, seg.addr + seg.size); + (seg.addr + seg.size <= asc->flash_window_base || + seg.addr > asc->flash_window_base + asc->flash_window_size)) { + aspeed_smc_error("new segment for CS%d is invalid : " + "[ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]", + cs, seg.addr, seg.addr + seg.size); return; } /* Check start address vs. alignment */ if (seg.size && !QEMU_IS_ALIGNED(seg.addr, seg.size)) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: new segment for CS%d is not " - "aligned : [ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n", - s->ctrl->name, cs, seg.addr, seg.addr + seg.size); + aspeed_smc_error("new segment for CS%d is not " + "aligned : [ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]", + cs, seg.addr, seg.addr + seg.size); } /* And segments should not overlap (in the specs) */ @@ -605,16 +323,15 @@ static void aspeed_smc_flash_set_segment(AspeedSMCState *s, int cs, static uint64_t aspeed_smc_flash_default_read(void *opaque, hwaddr addr, unsigned size) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: To 0x%" HWADDR_PRIx " of size %u" - PRIx64 "\n", __func__, addr, size); + aspeed_smc_error("To 0x%" HWADDR_PRIx " of size %u" PRIx64, addr, size); return 0; } static void aspeed_smc_flash_default_write(void *opaque, hwaddr addr, uint64_t data, unsigned size) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: To 0x%" HWADDR_PRIx " of size %u: 0x%" - PRIx64 "\n", __func__, addr, size, data); + aspeed_smc_error("To 0x%" HWADDR_PRIx " of size %u: 0x%" PRIx64, + addr, size, data); } static const MemoryRegionOps aspeed_smc_flash_default_ops = { @@ -631,20 +348,20 @@ static inline int aspeed_smc_flash_mode(const AspeedSMCFlash *fl) { const AspeedSMCState *s = fl->controller; - return s->regs[s->r_ctrl0 + fl->id] & CTRL_CMD_MODE_MASK; + return s->regs[s->r_ctrl0 + fl->cs] & CTRL_CMD_MODE_MASK; } static inline bool aspeed_smc_is_writable(const AspeedSMCFlash *fl) { const AspeedSMCState *s = fl->controller; - return s->regs[s->r_conf] & (1 << (s->conf_enable_w0 + fl->id)); + return s->regs[s->r_conf] & (1 << (s->conf_enable_w0 + fl->cs)); } static inline int aspeed_smc_flash_cmd(const AspeedSMCFlash *fl) { const AspeedSMCState *s = fl->controller; - int cmd = (s->regs[s->r_ctrl0 + fl->id] >> CTRL_CMD_SHIFT) & CTRL_CMD_MASK; + int cmd = (s->regs[s->r_ctrl0 + fl->cs] >> CTRL_CMD_SHIFT) & CTRL_CMD_MASK; /* * In read mode, the default SPI command is READ (0x3). In other @@ -657,21 +374,22 @@ static inline int aspeed_smc_flash_cmd(const AspeedSMCFlash *fl) } if (!cmd) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: no command defined for mode %d\n", - __func__, aspeed_smc_flash_mode(fl)); + aspeed_smc_error("no command defined for mode %d", + aspeed_smc_flash_mode(fl)); } return cmd; } -static inline int aspeed_smc_flash_is_4byte(const AspeedSMCFlash *fl) +static inline int aspeed_smc_flash_addr_width(const AspeedSMCFlash *fl) { const AspeedSMCState *s = fl->controller; + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); - if (s->ctrl->segments == aspeed_segments_spi) { - return s->regs[s->r_ctrl0] & CTRL_AST2400_SPI_4BYTE; + if (asc->addr_width) { + return asc->addr_width(s); } else { - return s->regs[s->r_ce_ctrl] & (1 << (CTRL_EXTENDED0 + fl->id)); + return s->regs[s->r_ce_ctrl] & (1 << (CTRL_EXTENDED0 + fl->cs)) ? 4 : 3; } } @@ -679,9 +397,9 @@ static void aspeed_smc_flash_do_select(AspeedSMCFlash *fl, bool unselect) { AspeedSMCState *s = fl->controller; - trace_aspeed_smc_flash_select(fl->id, unselect ? "un" : ""); + trace_aspeed_smc_flash_select(fl->cs, unselect ? "un" : ""); - qemu_set_irq(s->cs_lines[fl->id], unselect); + qemu_set_irq(s->cs_lines[fl->cs], unselect); } static void aspeed_smc_flash_select(AspeedSMCFlash *fl) @@ -698,15 +416,14 @@ static uint32_t aspeed_smc_check_segment_addr(const AspeedSMCFlash *fl, uint32_t addr) { const AspeedSMCState *s = fl->controller; + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); AspeedSegments seg; - s->ctrl->reg_to_segment(s, s->regs[R_SEG_ADDR0 + fl->id], &seg); + asc->reg_to_segment(s, s->regs[R_SEG_ADDR0 + fl->cs], &seg); if ((addr % seg.size) != addr) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: invalid address 0x%08x for CS%d segment : " - "[ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]\n", - s->ctrl->name, addr, fl->id, seg.addr, - seg.addr + seg.size); + aspeed_smc_error("invalid address 0x%08x for CS%d segment : " + "[ 0x%"HWADDR_PRIx" - 0x%"HWADDR_PRIx" ]", + addr, fl->cs, seg.addr, seg.addr + seg.size); addr %= seg.size; } @@ -716,7 +433,7 @@ static uint32_t aspeed_smc_check_segment_addr(const AspeedSMCFlash *fl, static int aspeed_smc_flash_dummies(const AspeedSMCFlash *fl) { const AspeedSMCState *s = fl->controller; - uint32_t r_ctrl0 = s->regs[s->r_ctrl0 + fl->id]; + uint32_t r_ctrl0 = s->regs[s->r_ctrl0 + fl->cs]; uint32_t dummy_high = (r_ctrl0 >> CTRL_DUMMY_HIGH_SHIFT) & 0x1; uint32_t dummy_low = (r_ctrl0 >> CTRL_DUMMY_LOW_SHIFT) & 0x3; uint32_t dummies = ((dummy_high << 2) | dummy_low) * 8; @@ -732,7 +449,7 @@ static void aspeed_smc_flash_setup(AspeedSMCFlash *fl, uint32_t addr) { const AspeedSMCState *s = fl->controller; uint8_t cmd = aspeed_smc_flash_cmd(fl); - int i = aspeed_smc_flash_is_4byte(fl) ? 4 : 3; + int i = aspeed_smc_flash_addr_width(fl); /* Flash access can not exceed CS segment */ addr = aspeed_smc_check_segment_addr(fl, addr); @@ -782,11 +499,10 @@ static uint64_t aspeed_smc_flash_read(void *opaque, hwaddr addr, unsigned size) aspeed_smc_flash_unselect(fl); break; default: - qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid flash mode %d\n", - __func__, aspeed_smc_flash_mode(fl)); + aspeed_smc_error("invalid flash mode %d", aspeed_smc_flash_mode(fl)); } - trace_aspeed_smc_flash_read(fl->id, addr, size, ret, + trace_aspeed_smc_flash_read(fl->cs, addr, size, ret, aspeed_smc_flash_mode(fl)); return ret; } @@ -841,9 +557,9 @@ static bool aspeed_smc_do_snoop(AspeedSMCFlash *fl, uint64_t data, unsigned size) { AspeedSMCState *s = fl->controller; - uint8_t addr_width = aspeed_smc_flash_is_4byte(fl) ? 4 : 3; + uint8_t addr_width = aspeed_smc_flash_addr_width(fl); - trace_aspeed_smc_do_snoop(fl->id, s->snoop_index, s->snoop_dummies, + trace_aspeed_smc_do_snoop(fl->cs, s->snoop_index, s->snoop_dummies, (uint8_t) data & 0xff); if (s->snoop_index == SNOOP_OFF) { @@ -896,12 +612,11 @@ static void aspeed_smc_flash_write(void *opaque, hwaddr addr, uint64_t data, AspeedSMCState *s = fl->controller; int i; - trace_aspeed_smc_flash_write(fl->id, addr, size, data, + trace_aspeed_smc_flash_write(fl->cs, addr, size, data, aspeed_smc_flash_mode(fl)); if (!aspeed_smc_is_writable(fl)) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: flash is not writable at 0x%" - HWADDR_PRIx "\n", __func__, addr); + aspeed_smc_error("flash is not writable at 0x%" HWADDR_PRIx, addr); return; } @@ -926,8 +641,7 @@ static void aspeed_smc_flash_write(void *opaque, hwaddr addr, uint64_t data, aspeed_smc_flash_unselect(fl); break; default: - qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid flash mode %d\n", - __func__, aspeed_smc_flash_mode(fl)); + aspeed_smc_error("invalid flash mode %d", aspeed_smc_flash_mode(fl)); } } @@ -950,12 +664,12 @@ static void aspeed_smc_flash_update_ctrl(AspeedSMCFlash *fl, uint32_t value) unselect = (value & CTRL_CMD_MODE_MASK) != CTRL_USERMODE; /* A change of CTRL_CE_STOP_ACTIVE from 0 to 1, unselects the CS */ - if (!(s->regs[s->r_ctrl0 + fl->id] & CTRL_CE_STOP_ACTIVE) && + if (!(s->regs[s->r_ctrl0 + fl->cs] & CTRL_CE_STOP_ACTIVE) && value & CTRL_CE_STOP_ACTIVE) { unselect = true; } - s->regs[s->r_ctrl0 + fl->id] = value; + s->regs[s->r_ctrl0 + fl->cs] = value; s->snoop_index = unselect ? SNOOP_OFF : SNOOP_START; @@ -965,9 +679,14 @@ static void aspeed_smc_flash_update_ctrl(AspeedSMCFlash *fl, uint32_t value) static void aspeed_smc_reset(DeviceState *d) { AspeedSMCState *s = ASPEED_SMC(d); + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); int i; - memset(s->regs, 0, sizeof s->regs); + if (asc->resets) { + memcpy(s->regs, asc->resets, sizeof s->regs); + } else { + memset(s->regs, 0, sizeof s->regs); + } /* Unselect all peripherals */ for (i = 0; i < s->num_cs; ++i) { @@ -976,30 +695,9 @@ static void aspeed_smc_reset(DeviceState *d) } /* setup the default segment register values and regions for all */ - for (i = 0; i < s->ctrl->max_peripherals; ++i) { + for (i = 0; i < asc->max_peripherals; ++i) { aspeed_smc_flash_set_segment_region(s, i, - s->ctrl->segment_to_reg(s, &s->ctrl->segments[i])); - } - - /* HW strapping flash type for the AST2600 controllers */ - if (s->ctrl->segments == aspeed_segments_ast2600_fmc) { - /* flash type is fixed to SPI for all */ - s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0); - s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE1); - s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE2); - } - - /* HW strapping flash type for FMC controllers */ - if (s->ctrl->segments == aspeed_segments_ast2500_fmc) { - /* flash type is fixed to SPI for CE0 and CE1 */ - s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0); - s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE1); - } - - /* HW strapping for AST2400 FMC controllers (SCU70). Let's use the - * configuration of the palmetto-bmc machine */ - if (s->ctrl->segments == aspeed_segments_fmc) { - s->regs[s->r_conf] |= (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0); + asc->segment_to_reg(s, &asc->segments[i])); } s->snoop_index = SNOOP_OFF; @@ -1009,26 +707,28 @@ static void aspeed_smc_reset(DeviceState *d) static uint64_t aspeed_smc_read(void *opaque, hwaddr addr, unsigned int size) { AspeedSMCState *s = ASPEED_SMC(opaque); + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(opaque); addr >>= 2; if (addr == s->r_conf || (addr >= s->r_timings && - addr < s->r_timings + s->ctrl->nregs_timings) || + addr < s->r_timings + asc->nregs_timings) || addr == s->r_ce_ctrl || addr == R_CE_CMD_CTRL || addr == R_INTR_CTRL || addr == R_DUMMY_DATA || - (aspeed_smc_has_dma(s) && addr == R_DMA_CTRL) || - (aspeed_smc_has_dma(s) && addr == R_DMA_FLASH_ADDR) || - (aspeed_smc_has_dma(s) && addr == R_DMA_DRAM_ADDR) || - (aspeed_smc_has_dma(s) && addr == R_DMA_LEN) || - (aspeed_smc_has_dma(s) && addr == R_DMA_CHECKSUM) || + (aspeed_smc_has_wdt_control(asc) && addr == R_FMC_WDT2_CTRL) || + (aspeed_smc_has_dma(asc) && addr == R_DMA_CTRL) || + (aspeed_smc_has_dma(asc) && addr == R_DMA_FLASH_ADDR) || + (aspeed_smc_has_dma(asc) && addr == R_DMA_DRAM_ADDR) || + (aspeed_smc_has_dma(asc) && addr == R_DMA_LEN) || + (aspeed_smc_has_dma(asc) && addr == R_DMA_CHECKSUM) || (addr >= R_SEG_ADDR0 && - addr < R_SEG_ADDR0 + s->ctrl->max_peripherals) || - (addr >= s->r_ctrl0 && addr < s->r_ctrl0 + s->ctrl->max_peripherals)) { + addr < R_SEG_ADDR0 + asc->max_peripherals) || + (addr >= s->r_ctrl0 && addr < s->r_ctrl0 + asc->max_peripherals)) { - trace_aspeed_smc_read(addr, size, s->regs[addr]); + trace_aspeed_smc_read(addr << 2, size, s->regs[addr]); return s->regs[addr]; } else { @@ -1052,7 +752,7 @@ static uint8_t aspeed_smc_hclk_divisor(uint8_t hclk_mask) } } - qemu_log_mask(LOG_GUEST_ERROR, "invalid HCLK mask %x", hclk_mask); + aspeed_smc_error("invalid HCLK mask %x", hclk_mask); return 0; } @@ -1132,8 +832,7 @@ static void aspeed_smc_dma_checksum(AspeedSMCState *s) uint32_t data; if (s->regs[R_DMA_CTRL] & DMA_CTRL_WRITE) { - qemu_log_mask(LOG_GUEST_ERROR, - "%s: invalid direction for DMA checksum\n", __func__); + aspeed_smc_error("invalid direction for DMA checksum"); return; } @@ -1145,8 +844,8 @@ static void aspeed_smc_dma_checksum(AspeedSMCState *s) data = address_space_ldl_le(&s->flash_as, s->regs[R_DMA_FLASH_ADDR], MEMTXATTRS_UNSPECIFIED, &result); if (result != MEMTX_OK) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: Flash read failed @%08x\n", - __func__, s->regs[R_DMA_FLASH_ADDR]); + aspeed_smc_error("Flash read failed @%08x", + s->regs[R_DMA_FLASH_ADDR]); return; } trace_aspeed_smc_dma_checksum(s->regs[R_DMA_FLASH_ADDR], data); @@ -1181,32 +880,32 @@ static void aspeed_smc_dma_rw(AspeedSMCState *s) data = address_space_ldl_le(&s->dram_as, s->regs[R_DMA_DRAM_ADDR], MEMTXATTRS_UNSPECIFIED, &result); if (result != MEMTX_OK) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: DRAM read failed @%08x\n", - __func__, s->regs[R_DMA_DRAM_ADDR]); + aspeed_smc_error("DRAM read failed @%08x", + s->regs[R_DMA_DRAM_ADDR]); return; } address_space_stl_le(&s->flash_as, s->regs[R_DMA_FLASH_ADDR], data, MEMTXATTRS_UNSPECIFIED, &result); if (result != MEMTX_OK) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: Flash write failed @%08x\n", - __func__, s->regs[R_DMA_FLASH_ADDR]); + aspeed_smc_error("Flash write failed @%08x", + s->regs[R_DMA_FLASH_ADDR]); return; } } else { data = address_space_ldl_le(&s->flash_as, s->regs[R_DMA_FLASH_ADDR], MEMTXATTRS_UNSPECIFIED, &result); if (result != MEMTX_OK) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: Flash read failed @%08x\n", - __func__, s->regs[R_DMA_FLASH_ADDR]); + aspeed_smc_error("Flash read failed @%08x", + s->regs[R_DMA_FLASH_ADDR]); return; } address_space_stl_le(&s->dram_as, s->regs[R_DMA_DRAM_ADDR], data, MEMTXATTRS_UNSPECIFIED, &result); if (result != MEMTX_OK) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: DRAM write failed @%08x\n", - __func__, s->regs[R_DMA_DRAM_ADDR]); + aspeed_smc_error("DRAM write failed @%08x", + s->regs[R_DMA_DRAM_ADDR]); return; } } @@ -1266,7 +965,7 @@ static void aspeed_smc_dma_ctrl(AspeedSMCState *s, uint32_t dma_ctrl) } if (aspeed_smc_dma_in_progress(s)) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA in progress\n", __func__); + aspeed_smc_error("DMA in progress !"); return; } @@ -1283,12 +982,14 @@ static void aspeed_smc_dma_ctrl(AspeedSMCState *s, uint32_t dma_ctrl) static inline bool aspeed_smc_dma_granted(AspeedSMCState *s) { - if (!(s->ctrl->features & ASPEED_SMC_FEATURE_DMA_GRANT)) { + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); + + if (!(asc->features & ASPEED_SMC_FEATURE_DMA_GRANT)) { return true; } if (!(s->regs[R_DMA_CTRL] & DMA_CTRL_GRANT)) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA not granted\n", __func__); + aspeed_smc_error("DMA not granted"); return false; } @@ -1313,7 +1014,7 @@ static void aspeed_2600_smc_dma_ctrl(AspeedSMCState *s, uint32_t dma_ctrl) } if (!aspeed_smc_dma_granted(s)) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA not granted\n", __func__); + aspeed_smc_error("DMA not granted"); return; } @@ -1325,22 +1026,23 @@ static void aspeed_smc_write(void *opaque, hwaddr addr, uint64_t data, unsigned int size) { AspeedSMCState *s = ASPEED_SMC(opaque); + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); uint32_t value = data; - addr >>= 2; - trace_aspeed_smc_write(addr, size, data); + addr >>= 2; + if (addr == s->r_conf || (addr >= s->r_timings && - addr < s->r_timings + s->ctrl->nregs_timings) || + addr < s->r_timings + asc->nregs_timings) || addr == s->r_ce_ctrl) { s->regs[addr] = value; } else if (addr >= s->r_ctrl0 && addr < s->r_ctrl0 + s->num_cs) { int cs = addr - s->r_ctrl0; aspeed_smc_flash_update_ctrl(&s->flashes[cs], value); } else if (addr >= R_SEG_ADDR0 && - addr < R_SEG_ADDR0 + s->ctrl->max_peripherals) { + addr < R_SEG_ADDR0 + asc->max_peripherals) { int cs = addr - R_SEG_ADDR0; if (value != s->regs[R_SEG_ADDR0 + cs]) { @@ -1350,17 +1052,19 @@ static void aspeed_smc_write(void *opaque, hwaddr addr, uint64_t data, s->regs[addr] = value & 0xff; } else if (addr == R_DUMMY_DATA) { s->regs[addr] = value & 0xff; + } else if (aspeed_smc_has_wdt_control(asc) && addr == R_FMC_WDT2_CTRL) { + s->regs[addr] = value & FMC_WDT2_CTRL_EN; } else if (addr == R_INTR_CTRL) { s->regs[addr] = value; - } else if (aspeed_smc_has_dma(s) && addr == R_DMA_CTRL) { - s->ctrl->dma_ctrl(s, value); - } else if (aspeed_smc_has_dma(s) && addr == R_DMA_DRAM_ADDR && + } else if (aspeed_smc_has_dma(asc) && addr == R_DMA_CTRL) { + asc->dma_ctrl(s, value); + } else if (aspeed_smc_has_dma(asc) && addr == R_DMA_DRAM_ADDR && aspeed_smc_dma_granted(s)) { - s->regs[addr] = DMA_DRAM_ADDR(s, value); - } else if (aspeed_smc_has_dma(s) && addr == R_DMA_FLASH_ADDR && + s->regs[addr] = DMA_DRAM_ADDR(asc, value); + } else if (aspeed_smc_has_dma(asc) && addr == R_DMA_FLASH_ADDR && aspeed_smc_dma_granted(s)) { - s->regs[addr] = DMA_FLASH_ADDR(s, value); - } else if (aspeed_smc_has_dma(s) && addr == R_DMA_LEN && + s->regs[addr] = DMA_FLASH_ADDR(asc, value); + } else if (aspeed_smc_has_dma(asc) && addr == R_DMA_LEN && aspeed_smc_dma_granted(s)) { s->regs[addr] = DMA_LENGTH(value); } else { @@ -1376,50 +1080,53 @@ static const MemoryRegionOps aspeed_smc_ops = { .endianness = DEVICE_LITTLE_ENDIAN, }; +static void aspeed_smc_instance_init(Object *obj) +{ + AspeedSMCState *s = ASPEED_SMC(obj); + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); + int i; + + for (i = 0; i < asc->max_peripherals; i++) { + object_initialize_child(obj, "flash[*]", &s->flashes[i], + TYPE_ASPEED_SMC_FLASH); + } +} + /* * Initialize the custom address spaces for DMAs */ static void aspeed_smc_dma_setup(AspeedSMCState *s, Error **errp) { - char *name; - if (!s->dram_mr) { error_setg(errp, TYPE_ASPEED_SMC ": 'dram' link not set"); return; } - name = g_strdup_printf("%s-dma-flash", s->ctrl->name); - address_space_init(&s->flash_as, &s->mmio_flash, name); - g_free(name); - - name = g_strdup_printf("%s-dma-dram", s->ctrl->name); - address_space_init(&s->dram_as, s->dram_mr, name); - g_free(name); + address_space_init(&s->flash_as, &s->mmio_flash, + TYPE_ASPEED_SMC ".dma-flash"); + address_space_init(&s->dram_as, s->dram_mr, + TYPE_ASPEED_SMC ".dma-dram"); } static void aspeed_smc_realize(DeviceState *dev, Error **errp) { SysBusDevice *sbd = SYS_BUS_DEVICE(dev); AspeedSMCState *s = ASPEED_SMC(dev); - AspeedSMCClass *mc = ASPEED_SMC_GET_CLASS(s); + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); int i; - char name[32]; hwaddr offset = 0; - s->ctrl = mc->ctrl; - /* keep a copy under AspeedSMCState to speed up accesses */ - s->r_conf = s->ctrl->r_conf; - s->r_ce_ctrl = s->ctrl->r_ce_ctrl; - s->r_ctrl0 = s->ctrl->r_ctrl0; - s->r_timings = s->ctrl->r_timings; - s->conf_enable_w0 = s->ctrl->conf_enable_w0; + s->r_conf = asc->r_conf; + s->r_ce_ctrl = asc->r_ce_ctrl; + s->r_ctrl0 = asc->r_ctrl0; + s->r_timings = asc->r_timings; + s->conf_enable_w0 = asc->conf_enable_w0; /* Enforce some real HW limits */ - if (s->num_cs > s->ctrl->max_peripherals) { - qemu_log_mask(LOG_GUEST_ERROR, "%s: num_cs cannot exceed: %d\n", - __func__, s->ctrl->max_peripherals); - s->num_cs = s->ctrl->max_peripherals; + if (s->num_cs > asc->max_peripherals) { + aspeed_smc_error("num_cs cannot exceed: %d", asc->max_peripherals); + s->num_cs = asc->max_peripherals; } /* DMA irq. Keep it first for the initialization in the SoC */ @@ -1436,7 +1143,7 @@ static void aspeed_smc_realize(DeviceState *dev, Error **errp) /* The memory region for the controller registers */ memory_region_init_io(&s->mmio, OBJECT(s), &aspeed_smc_ops, s, - s->ctrl->name, s->ctrl->nregs * 4); + TYPE_ASPEED_SMC, asc->nregs * 4); sysbus_init_mmio(sbd, &s->mmio); /* @@ -1444,16 +1151,17 @@ static void aspeed_smc_realize(DeviceState *dev, Error **errp) * window in which the flash modules are mapped. The size and * address depends on the SoC model and controller type. */ - snprintf(name, sizeof(name), "%s.flash", s->ctrl->name); + memory_region_init(&s->mmio_flash_container, OBJECT(s), + TYPE_ASPEED_SMC ".container", + asc->flash_window_size); + sysbus_init_mmio(sbd, &s->mmio_flash_container); memory_region_init_io(&s->mmio_flash, OBJECT(s), - &aspeed_smc_flash_default_ops, s, name, - s->ctrl->flash_window_size); - memory_region_init_alias(&s->mmio_flash_alias, OBJECT(s), name, - &s->mmio_flash, 0, s->ctrl->flash_window_size); - sysbus_init_mmio(sbd, &s->mmio_flash_alias); - - s->flashes = g_new0(AspeedSMCFlash, s->ctrl->max_peripherals); + &aspeed_smc_flash_default_ops, s, + TYPE_ASPEED_SMC ".flash", + asc->flash_window_size); + memory_region_add_subregion(&s->mmio_flash_container, 0x0, + &s->mmio_flash); /* * Let's create a sub memory region for each possible peripheral. All @@ -1462,22 +1170,26 @@ static void aspeed_smc_realize(DeviceState *dev, Error **errp) * module behind to handle the memory accesses. This depends on * the board configuration. */ - for (i = 0; i < s->ctrl->max_peripherals; ++i) { + for (i = 0; i < asc->max_peripherals; ++i) { AspeedSMCFlash *fl = &s->flashes[i]; - snprintf(name, sizeof(name), "%s.%d", s->ctrl->name, i); + if (!object_property_set_link(OBJECT(fl), "controller", OBJECT(s), + errp)) { + return; + } + if (!object_property_set_uint(OBJECT(fl), "cs", i, errp)) { + return; + } + if (!sysbus_realize(SYS_BUS_DEVICE(fl), errp)) { + return; + } - fl->id = i; - fl->controller = s; - fl->size = s->ctrl->segments[i].size; - memory_region_init_io(&fl->mmio, OBJECT(s), &aspeed_smc_flash_ops, - fl, name, fl->size); memory_region_add_subregion(&s->mmio_flash, offset, &fl->mmio); - offset += fl->size; + offset += asc->segments[i].size; } /* DMA support */ - if (aspeed_smc_has_dma(s)) { + if (aspeed_smc_has_dma(asc)) { aspeed_smc_dma_setup(s, errp); } } @@ -1505,37 +1217,494 @@ static Property aspeed_smc_properties[] = { static void aspeed_smc_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - AspeedSMCClass *mc = ASPEED_SMC_CLASS(klass); dc->realize = aspeed_smc_realize; dc->reset = aspeed_smc_reset; device_class_set_props(dc, aspeed_smc_properties); dc->vmsd = &vmstate_aspeed_smc; - mc->ctrl = data; } static const TypeInfo aspeed_smc_info = { .name = TYPE_ASPEED_SMC, .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = aspeed_smc_instance_init, .instance_size = sizeof(AspeedSMCState), .class_size = sizeof(AspeedSMCClass), + .class_init = aspeed_smc_class_init, .abstract = true, }; +static void aspeed_smc_flash_realize(DeviceState *dev, Error **errp) +{ + AspeedSMCFlash *s = ASPEED_SMC_FLASH(dev); + AspeedSMCClass *asc; + g_autofree char *name = g_strdup_printf(TYPE_ASPEED_SMC_FLASH ".%d", s->cs); + + if (!s->controller) { + error_setg(errp, TYPE_ASPEED_SMC_FLASH ": 'controller' link not set"); + return; + } + + asc = ASPEED_SMC_GET_CLASS(s->controller); + + /* + * Use the default segment value to size the memory region. This + * can be changed by FW at runtime. + */ + memory_region_init_io(&s->mmio, OBJECT(s), &aspeed_smc_flash_ops, + s, name, asc->segments[s->cs].size); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->mmio); +} + +static Property aspeed_smc_flash_properties[] = { + DEFINE_PROP_UINT8("cs", AspeedSMCFlash, cs, 0), + DEFINE_PROP_LINK("controller", AspeedSMCFlash, controller, TYPE_ASPEED_SMC, + AspeedSMCState *), + DEFINE_PROP_END_OF_LIST(), +}; + +static void aspeed_smc_flash_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "Aspeed SMC Flash device region"; + dc->realize = aspeed_smc_flash_realize; + device_class_set_props(dc, aspeed_smc_flash_properties); +} + +static const TypeInfo aspeed_smc_flash_info = { + .name = TYPE_ASPEED_SMC_FLASH, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedSMCFlash), + .class_init = aspeed_smc_flash_class_init, +}; + +/* + * The Segment Registers of the AST2400 and AST2500 have a 8MB + * unit. The address range of a flash SPI peripheral is encoded with + * absolute addresses which should be part of the overall controller + * window. + */ +static uint32_t aspeed_smc_segment_to_reg(const AspeedSMCState *s, + const AspeedSegments *seg) +{ + uint32_t reg = 0; + reg |= ((seg->addr >> 23) & SEG_START_MASK) << SEG_START_SHIFT; + reg |= (((seg->addr + seg->size) >> 23) & SEG_END_MASK) << SEG_END_SHIFT; + return reg; +} + +static void aspeed_smc_reg_to_segment(const AspeedSMCState *s, + uint32_t reg, AspeedSegments *seg) +{ + seg->addr = ((reg >> SEG_START_SHIFT) & SEG_START_MASK) << 23; + seg->size = (((reg >> SEG_END_SHIFT) & SEG_END_MASK) << 23) - seg->addr; +} + +static const AspeedSegments aspeed_2400_smc_segments[] = { + { 0x10000000, 32 * MiB }, +}; + +static void aspeed_2400_smc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2400 SMC Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 1; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->max_peripherals = 1; + asc->segments = aspeed_2400_smc_segments; + asc->flash_window_base = 0x10000000; + asc->flash_window_size = 0x6000000; + asc->features = 0x0; + asc->nregs = ASPEED_SMC_R_SMC_MAX; + asc->segment_to_reg = aspeed_smc_segment_to_reg; + asc->reg_to_segment = aspeed_smc_reg_to_segment; + asc->dma_ctrl = aspeed_smc_dma_ctrl; +} + +static const TypeInfo aspeed_2400_smc_info = { + .name = "aspeed.smc-ast2400", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2400_smc_class_init, +}; + +static const uint32_t aspeed_2400_fmc_resets[ASPEED_SMC_R_MAX] = { + /* + * CE0 and CE1 types are HW strapped in SCU70. Do it here to + * simplify the model. + */ + [R_CONF] = CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0, +}; + +static const AspeedSegments aspeed_2400_fmc_segments[] = { + { 0x20000000, 64 * MiB }, /* start address is readonly */ + { 0x24000000, 32 * MiB }, + { 0x26000000, 32 * MiB }, + { 0x28000000, 32 * MiB }, + { 0x2A000000, 32 * MiB } +}; + +static void aspeed_2400_fmc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2400 FMC Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 1; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->max_peripherals = 5; + asc->segments = aspeed_2400_fmc_segments; + asc->resets = aspeed_2400_fmc_resets; + asc->flash_window_base = 0x20000000; + asc->flash_window_size = 0x10000000; + asc->features = ASPEED_SMC_FEATURE_DMA; + asc->dma_flash_mask = 0x0FFFFFFC; + asc->dma_dram_mask = 0x1FFFFFFC; + asc->nregs = ASPEED_SMC_R_MAX; + asc->segment_to_reg = aspeed_smc_segment_to_reg; + asc->reg_to_segment = aspeed_smc_reg_to_segment; + asc->dma_ctrl = aspeed_smc_dma_ctrl; +} + +static const TypeInfo aspeed_2400_fmc_info = { + .name = "aspeed.fmc-ast2400", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2400_fmc_class_init, +}; + +static const AspeedSegments aspeed_2400_spi1_segments[] = { + { 0x30000000, 64 * MiB }, +}; + +static int aspeed_2400_spi1_addr_width(const AspeedSMCState *s) +{ + return s->regs[R_SPI_CTRL0] & CTRL_AST2400_SPI_4BYTE ? 4 : 3; +} + +static void aspeed_2400_spi1_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2400 SPI1 Controller"; + asc->r_conf = R_SPI_CONF; + asc->r_ce_ctrl = 0xff; + asc->r_ctrl0 = R_SPI_CTRL0; + asc->r_timings = R_SPI_TIMINGS; + asc->nregs_timings = 1; + asc->conf_enable_w0 = SPI_CONF_ENABLE_W0; + asc->max_peripherals = 1; + asc->segments = aspeed_2400_spi1_segments; + asc->flash_window_base = 0x30000000; + asc->flash_window_size = 0x10000000; + asc->features = 0x0; + asc->nregs = ASPEED_SMC_R_SPI_MAX; + asc->segment_to_reg = aspeed_smc_segment_to_reg; + asc->reg_to_segment = aspeed_smc_reg_to_segment; + asc->dma_ctrl = aspeed_smc_dma_ctrl; + asc->addr_width = aspeed_2400_spi1_addr_width; +} + +static const TypeInfo aspeed_2400_spi1_info = { + .name = "aspeed.spi1-ast2400", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2400_spi1_class_init, +}; + +static const uint32_t aspeed_2500_fmc_resets[ASPEED_SMC_R_MAX] = { + [R_CONF] = (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0 | + CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE1), +}; + +static const AspeedSegments aspeed_2500_fmc_segments[] = { + { 0x20000000, 128 * MiB }, /* start address is readonly */ + { 0x28000000, 32 * MiB }, + { 0x2A000000, 32 * MiB }, +}; + +static void aspeed_2500_fmc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2600 FMC Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 1; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->max_peripherals = 3; + asc->segments = aspeed_2500_fmc_segments; + asc->resets = aspeed_2500_fmc_resets; + asc->flash_window_base = 0x20000000; + asc->flash_window_size = 0x10000000; + asc->features = ASPEED_SMC_FEATURE_DMA; + asc->dma_flash_mask = 0x0FFFFFFC; + asc->dma_dram_mask = 0x3FFFFFFC; + asc->nregs = ASPEED_SMC_R_MAX; + asc->segment_to_reg = aspeed_smc_segment_to_reg; + asc->reg_to_segment = aspeed_smc_reg_to_segment; + asc->dma_ctrl = aspeed_smc_dma_ctrl; +} + +static const TypeInfo aspeed_2500_fmc_info = { + .name = "aspeed.fmc-ast2500", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2500_fmc_class_init, +}; + +static const AspeedSegments aspeed_2500_spi1_segments[] = { + { 0x30000000, 32 * MiB }, /* start address is readonly */ + { 0x32000000, 96 * MiB }, /* end address is readonly */ +}; + +static void aspeed_2500_spi1_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2600 SPI1 Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 1; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->max_peripherals = 2; + asc->segments = aspeed_2500_spi1_segments; + asc->flash_window_base = 0x30000000; + asc->flash_window_size = 0x8000000; + asc->features = 0x0; + asc->nregs = ASPEED_SMC_R_MAX; + asc->segment_to_reg = aspeed_smc_segment_to_reg; + asc->reg_to_segment = aspeed_smc_reg_to_segment; + asc->dma_ctrl = aspeed_smc_dma_ctrl; +} + +static const TypeInfo aspeed_2500_spi1_info = { + .name = "aspeed.spi1-ast2500", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2500_spi1_class_init, +}; + +static const AspeedSegments aspeed_2500_spi2_segments[] = { + { 0x38000000, 32 * MiB }, /* start address is readonly */ + { 0x3A000000, 96 * MiB }, /* end address is readonly */ +}; + +static void aspeed_2500_spi2_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2600 SPI2 Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 1; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->max_peripherals = 2; + asc->segments = aspeed_2500_spi2_segments; + asc->flash_window_base = 0x38000000; + asc->flash_window_size = 0x8000000; + asc->features = 0x0; + asc->nregs = ASPEED_SMC_R_MAX; + asc->segment_to_reg = aspeed_smc_segment_to_reg; + asc->reg_to_segment = aspeed_smc_reg_to_segment; + asc->dma_ctrl = aspeed_smc_dma_ctrl; +} + +static const TypeInfo aspeed_2500_spi2_info = { + .name = "aspeed.spi2-ast2500", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2500_spi2_class_init, +}; + +/* + * The Segment Registers of the AST2600 have a 1MB unit. The address + * range of a flash SPI peripheral is encoded with offsets in the overall + * controller window. The previous SoC AST2400 and AST2500 used + * absolute addresses. Only bits [27:20] are relevant and the end + * address is an upper bound limit. + */ +#define AST2600_SEG_ADDR_MASK 0x0ff00000 + +static uint32_t aspeed_2600_smc_segment_to_reg(const AspeedSMCState *s, + const AspeedSegments *seg) +{ + uint32_t reg = 0; + + /* Disabled segments have a nil register */ + if (!seg->size) { + return 0; + } + + reg |= (seg->addr & AST2600_SEG_ADDR_MASK) >> 16; /* start offset */ + reg |= (seg->addr + seg->size - 1) & AST2600_SEG_ADDR_MASK; /* end offset */ + return reg; +} + +static void aspeed_2600_smc_reg_to_segment(const AspeedSMCState *s, + uint32_t reg, AspeedSegments *seg) +{ + uint32_t start_offset = (reg << 16) & AST2600_SEG_ADDR_MASK; + uint32_t end_offset = reg & AST2600_SEG_ADDR_MASK; + AspeedSMCClass *asc = ASPEED_SMC_GET_CLASS(s); + + if (reg) { + seg->addr = asc->flash_window_base + start_offset; + seg->size = end_offset + MiB - start_offset; + } else { + seg->addr = asc->flash_window_base; + seg->size = 0; + } +} + +static const uint32_t aspeed_2600_fmc_resets[ASPEED_SMC_R_MAX] = { + [R_CONF] = (CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE0 | + CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE1 | + CONF_FLASH_TYPE_SPI << CONF_FLASH_TYPE2), +}; + +static const AspeedSegments aspeed_2600_fmc_segments[] = { + { 0x0, 128 * MiB }, /* start address is readonly */ + { 128 * MiB, 128 * MiB }, /* default is disabled but needed for -kernel */ + { 0x0, 0 }, /* disabled */ +}; + +static void aspeed_2600_fmc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2600 FMC Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 1; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->max_peripherals = 3; + asc->segments = aspeed_2600_fmc_segments; + asc->resets = aspeed_2600_fmc_resets; + asc->flash_window_base = 0x20000000; + asc->flash_window_size = 0x10000000; + asc->features = ASPEED_SMC_FEATURE_DMA | + ASPEED_SMC_FEATURE_WDT_CONTROL; + asc->dma_flash_mask = 0x0FFFFFFC; + asc->dma_dram_mask = 0x3FFFFFFC; + asc->nregs = ASPEED_SMC_R_MAX; + asc->segment_to_reg = aspeed_2600_smc_segment_to_reg; + asc->reg_to_segment = aspeed_2600_smc_reg_to_segment; + asc->dma_ctrl = aspeed_2600_smc_dma_ctrl; +} + +static const TypeInfo aspeed_2600_fmc_info = { + .name = "aspeed.fmc-ast2600", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2600_fmc_class_init, +}; + +static const AspeedSegments aspeed_2600_spi1_segments[] = { + { 0x0, 128 * MiB }, /* start address is readonly */ + { 0x0, 0 }, /* disabled */ +}; + +static void aspeed_2600_spi1_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2600 SPI1 Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 2; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->max_peripherals = 2; + asc->segments = aspeed_2600_spi1_segments; + asc->flash_window_base = 0x30000000; + asc->flash_window_size = 0x10000000; + asc->features = ASPEED_SMC_FEATURE_DMA | + ASPEED_SMC_FEATURE_DMA_GRANT; + asc->dma_flash_mask = 0x0FFFFFFC; + asc->dma_dram_mask = 0x3FFFFFFC; + asc->nregs = ASPEED_SMC_R_MAX; + asc->segment_to_reg = aspeed_2600_smc_segment_to_reg; + asc->reg_to_segment = aspeed_2600_smc_reg_to_segment; + asc->dma_ctrl = aspeed_2600_smc_dma_ctrl; +} + +static const TypeInfo aspeed_2600_spi1_info = { + .name = "aspeed.spi1-ast2600", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2600_spi1_class_init, +}; + +static const AspeedSegments aspeed_2600_spi2_segments[] = { + { 0x0, 128 * MiB }, /* start address is readonly */ + { 0x0, 0 }, /* disabled */ + { 0x0, 0 }, /* disabled */ +}; + +static void aspeed_2600_spi2_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSMCClass *asc = ASPEED_SMC_CLASS(klass); + + dc->desc = "Aspeed 2600 SPI2 Controller"; + asc->r_conf = R_CONF; + asc->r_ce_ctrl = R_CE_CTRL; + asc->r_ctrl0 = R_CTRL0; + asc->r_timings = R_TIMINGS; + asc->nregs_timings = 3; + asc->conf_enable_w0 = CONF_ENABLE_W0; + asc->max_peripherals = 3; + asc->segments = aspeed_2600_spi2_segments; + asc->flash_window_base = 0x50000000; + asc->flash_window_size = 0x10000000; + asc->features = ASPEED_SMC_FEATURE_DMA | + ASPEED_SMC_FEATURE_DMA_GRANT; + asc->dma_flash_mask = 0x0FFFFFFC; + asc->dma_dram_mask = 0x3FFFFFFC; + asc->nregs = ASPEED_SMC_R_MAX; + asc->segment_to_reg = aspeed_2600_smc_segment_to_reg; + asc->reg_to_segment = aspeed_2600_smc_reg_to_segment; + asc->dma_ctrl = aspeed_2600_smc_dma_ctrl; +} + +static const TypeInfo aspeed_2600_spi2_info = { + .name = "aspeed.spi2-ast2600", + .parent = TYPE_ASPEED_SMC, + .class_init = aspeed_2600_spi2_class_init, +}; + static void aspeed_smc_register_types(void) { - int i; - + type_register_static(&aspeed_smc_flash_info); type_register_static(&aspeed_smc_info); - for (i = 0; i < ARRAY_SIZE(controllers); ++i) { - TypeInfo ti = { - .name = controllers[i].name, - .parent = TYPE_ASPEED_SMC, - .class_init = aspeed_smc_class_init, - .class_data = (void *)&controllers[i], - }; - type_register(&ti); - } + type_register_static(&aspeed_2400_smc_info); + type_register_static(&aspeed_2400_fmc_info); + type_register_static(&aspeed_2400_spi1_info); + type_register_static(&aspeed_2500_fmc_info); + type_register_static(&aspeed_2500_spi1_info); + type_register_static(&aspeed_2500_spi2_info); + type_register_static(&aspeed_2600_fmc_info); + type_register_static(&aspeed_2600_spi1_info); + type_register_static(&aspeed_2600_spi2_info); } type_init(aspeed_smc_register_types) diff --git a/hw/ssi/ssi.c b/hw/ssi/ssi.c index e5d7ce9523..003931fb50 100644 --- a/hw/ssi/ssi.c +++ b/hw/ssi/ssi.c @@ -107,7 +107,7 @@ DeviceState *ssi_create_peripheral(SSIBus *bus, const char *name) SSIBus *ssi_create_bus(DeviceState *parent, const char *name) { BusState *bus; - bus = qbus_create(TYPE_SSI_BUS, parent, name); + bus = qbus_new(TYPE_SSI_BUS, parent, name); return SSI_BUS(bus); } diff --git a/hw/timer/sh_timer.c b/hw/timer/sh_timer.c index 58af1a1edb..c72c327bfa 100644 --- a/hw/timer/sh_timer.c +++ b/hw/timer/sh_timer.c @@ -10,13 +10,12 @@ #include "qemu/osdep.h" #include "exec/memory.h" -#include "hw/hw.h" +#include "qemu/log.h" #include "hw/irq.h" #include "hw/sh4/sh.h" #include "hw/timer/tmu012.h" #include "hw/ptimer.h" - -//#define DEBUG_TIMER +#include "trace.h" #define TIMER_TCR_TPSC (7 << 0) #define TIMER_TCR_CKEG (3 << 3) @@ -46,24 +45,24 @@ typedef struct { int feat; int enabled; qemu_irq irq; -} sh_timer_state; +} SHTimerState; /* Check all active timers, and schedule the next timer interrupt. */ -static void sh_timer_update(sh_timer_state *s) +static void sh_timer_update(SHTimerState *s) { int new_level = s->int_level && (s->tcr & TIMER_TCR_UNIE); - if (new_level != s->old_level) - qemu_set_irq (s->irq, new_level); - + if (new_level != s->old_level) { + qemu_set_irq(s->irq, new_level); + } s->old_level = s->int_level; s->int_level = new_level; } static uint32_t sh_timer_read(void *opaque, hwaddr offset) { - sh_timer_state *s = (sh_timer_state *)opaque; + SHTimerState *s = opaque; switch (offset >> 2) { case OFFSET_TCOR: @@ -73,19 +72,18 @@ static uint32_t sh_timer_read(void *opaque, hwaddr offset) case OFFSET_TCR: return s->tcr | (s->int_level ? TIMER_TCR_UNF : 0); case OFFSET_TCPR: - if (s->feat & TIMER_FEAT_CAPT) + if (s->feat & TIMER_FEAT_CAPT) { return s->tcpr; - /* fall through */ - default: - hw_error("sh_timer_read: Bad offset %x\n", (int)offset); - return 0; + } } + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIx "\n", + __func__, offset); + return 0; } -static void sh_timer_write(void *opaque, hwaddr offset, - uint32_t value) +static void sh_timer_write(void *opaque, hwaddr offset, uint32_t value) { - sh_timer_state *s = (sh_timer_state *)opaque; + SHTimerState *s = opaque; int freq; switch (offset >> 2) { @@ -104,19 +102,30 @@ static void sh_timer_write(void *opaque, hwaddr offset, case OFFSET_TCR: ptimer_transaction_begin(s->timer); if (s->enabled) { - /* Pause the timer if it is running. This may cause some - inaccuracy dure to rounding, but avoids a whole lot of other - messyness. */ + /* + * Pause the timer if it is running. This may cause some inaccuracy + * due to rounding, but avoids a whole lot of other messiness + */ ptimer_stop(s->timer); } freq = s->freq; /* ??? Need to recalculate expiry time after changing divisor. */ switch (value & TIMER_TCR_TPSC) { - case 0: freq >>= 2; break; - case 1: freq >>= 4; break; - case 2: freq >>= 6; break; - case 3: freq >>= 8; break; - case 4: freq >>= 10; break; + case 0: + freq >>= 2; + break; + case 1: + freq >>= 4; + break; + case 2: + freq >>= 6; + break; + case 3: + freq >>= 8; + break; + case 4: + freq >>= 10; + break; case 6: case 7: if (s->feat & TIMER_FEAT_EXTCLK) { @@ -124,7 +133,8 @@ static void sh_timer_write(void *opaque, hwaddr offset, } /* fallthrough */ default: - hw_error("sh_timer_write: Reserved TPSC value\n"); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Reserved TPSC value\n", __func__); } switch ((value & TIMER_TCR_CKEG) >> 3) { case 0: @@ -137,7 +147,8 @@ static void sh_timer_write(void *opaque, hwaddr offset, } /* fallthrough */ default: - hw_error("sh_timer_write: Reserved CKEG value\n"); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Reserved CKEG value\n", __func__); } switch ((value & TIMER_TCR_ICPE) >> 6) { case 0: @@ -149,7 +160,8 @@ static void sh_timer_write(void *opaque, hwaddr offset, } /* fallthrough */ default: - hw_error("sh_timer_write: Reserved ICPE value\n"); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Reserved ICPE value\n", __func__); } if ((value & TIMER_TCR_UNF) == 0) { s->int_level = 0; @@ -158,13 +170,15 @@ static void sh_timer_write(void *opaque, hwaddr offset, value &= ~TIMER_TCR_UNF; if ((value & TIMER_TCR_ICPF) && (!(s->feat & TIMER_FEAT_CAPT))) { - hw_error("sh_timer_write: Reserved ICPF value\n"); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Reserved ICPF value\n", __func__); } value &= ~TIMER_TCR_ICPF; /* capture not supported */ if (value & TIMER_TCR_RESERVED) { - hw_error("sh_timer_write: Reserved TCR bits set\n"); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Reserved TCR bits set\n", __func__); } s->tcr = value; ptimer_set_limit(s->timer, s->tcor, 0); @@ -182,19 +196,17 @@ static void sh_timer_write(void *opaque, hwaddr offset, } /* fallthrough */ default: - hw_error("sh_timer_write: Bad offset %x\n", (int)offset); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, offset); } sh_timer_update(s); } static void sh_timer_start_stop(void *opaque, int enable) { - sh_timer_state *s = (sh_timer_state *)opaque; - -#ifdef DEBUG_TIMER - printf("sh_timer_start_stop %d (%d)\n", enable, s->enabled); -#endif + SHTimerState *s = opaque; + trace_sh_timer_start_stop(enable, s->enabled); ptimer_transaction_begin(s->timer); if (s->enabled && !enable) { ptimer_stop(s->timer); @@ -204,24 +216,20 @@ static void sh_timer_start_stop(void *opaque, int enable) } ptimer_transaction_commit(s->timer); s->enabled = !!enable; - -#ifdef DEBUG_TIMER - printf("sh_timer_start_stop done %d\n", s->enabled); -#endif } static void sh_timer_tick(void *opaque) { - sh_timer_state *s = (sh_timer_state *)opaque; + SHTimerState *s = opaque; s->int_level = s->enabled; sh_timer_update(s); } static void *sh_timer_init(uint32_t freq, int feat, qemu_irq irq) { - sh_timer_state *s; + SHTimerState *s; - s = (sh_timer_state *)g_malloc0(sizeof(sh_timer_state)); + s = g_malloc0(sizeof(*s)); s->freq = freq; s->feat = feat; s->tcor = 0xffffffff; @@ -252,50 +260,49 @@ typedef struct { int feat; } tmu012_state; -static uint64_t tmu012_read(void *opaque, hwaddr offset, - unsigned size) +static uint64_t tmu012_read(void *opaque, hwaddr offset, unsigned size) { - tmu012_state *s = (tmu012_state *)opaque; - -#ifdef DEBUG_TIMER - printf("tmu012_read 0x%lx\n", (unsigned long) offset); -#endif + tmu012_state *s = opaque; + trace_sh_timer_read(offset); if (offset >= 0x20) { if (!(s->feat & TMU012_FEAT_3CHAN)) { - hw_error("tmu012_write: Bad channel offset %x\n", (int)offset); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad channel offset 0x%" HWADDR_PRIx "\n", + __func__, offset); } return sh_timer_read(s->timer[2], offset - 0x20); } - if (offset >= 0x14) + if (offset >= 0x14) { return sh_timer_read(s->timer[1], offset - 0x14); - - if (offset >= 0x08) + } + if (offset >= 0x08) { return sh_timer_read(s->timer[0], offset - 0x08); - - if (offset == 4) + } + if (offset == 4) { return s->tstr; - - if ((s->feat & TMU012_FEAT_TOCR) && offset == 0) + } + if ((s->feat & TMU012_FEAT_TOCR) && offset == 0) { return s->tocr; + } - hw_error("tmu012_write: Bad offset %x\n", (int)offset); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad offset 0x%" HWADDR_PRIx "\n", __func__, offset); return 0; } static void tmu012_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { - tmu012_state *s = (tmu012_state *)opaque; - -#ifdef DEBUG_TIMER - printf("tmu012_write 0x%lx 0x%08x\n", (unsigned long) offset, value); -#endif + tmu012_state *s = opaque; + trace_sh_timer_write(offset, value); if (offset >= 0x20) { if (!(s->feat & TMU012_FEAT_3CHAN)) { - hw_error("tmu012_write: Bad channel offset %x\n", (int)offset); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Bad channel offset 0x%" HWADDR_PRIx "\n", + __func__, offset); } sh_timer_write(s->timer[2], offset - 0x20, value); return; @@ -318,7 +325,7 @@ static void tmu012_write(void *opaque, hwaddr offset, sh_timer_start_stop(s->timer[2], value & (1 << 2)); } else { if (value & (1 << 2)) { - hw_error("tmu012_write: Bad channel\n"); + qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad channel\n", __func__); } } @@ -337,15 +344,14 @@ static const MemoryRegionOps tmu012_ops = { .endianness = DEVICE_NATIVE_ENDIAN, }; -void tmu012_init(MemoryRegion *sysmem, hwaddr base, - int feat, uint32_t freq, +void tmu012_init(MemoryRegion *sysmem, hwaddr base, int feat, uint32_t freq, qemu_irq ch0_irq, qemu_irq ch1_irq, qemu_irq ch2_irq0, qemu_irq ch2_irq1) { tmu012_state *s; int timer_feat = (feat & TMU012_FEAT_EXTCLK) ? TIMER_FEAT_EXTCLK : 0; - s = (tmu012_state *)g_malloc0(sizeof(tmu012_state)); + s = g_malloc0(sizeof(*s)); s->feat = feat; s->timer[0] = sh_timer_init(freq, timer_feat, ch0_irq); s->timer[1] = sh_timer_init(freq, timer_feat, ch1_irq); @@ -354,15 +360,14 @@ void tmu012_init(MemoryRegion *sysmem, hwaddr base, ch2_irq0); /* ch2_irq1 not supported */ } - memory_region_init_io(&s->iomem, NULL, &tmu012_ops, s, - "timer", 0x100000000ULL); + memory_region_init_io(&s->iomem, NULL, &tmu012_ops, s, "timer", 0x30); memory_region_init_alias(&s->iomem_p4, NULL, "timer-p4", - &s->iomem, 0, 0x1000); + &s->iomem, 0, memory_region_size(&s->iomem)); memory_region_add_subregion(sysmem, P4ADDR(base), &s->iomem_p4); memory_region_init_alias(&s->iomem_a7, NULL, "timer-a7", - &s->iomem, 0, 0x1000); + &s->iomem, 0, memory_region_size(&s->iomem)); memory_region_add_subregion(sysmem, A7ADDR(base), &s->iomem_a7); /* ??? Save/restore. */ } diff --git a/hw/timer/trace-events b/hw/timer/trace-events index d0edcd2a80..3eccef8385 100644 --- a/hw/timer/trace-events +++ b/hw/timer/trace-events @@ -94,3 +94,8 @@ sifive_pwm_set_alarm(uint64_t alarm, uint64_t now) "Setting alarm to: 0x%" PRIx6 sifive_pwm_interrupt(int num) "Interrupt %d" sifive_pwm_read(uint64_t offset) "Read at address: 0x%" PRIx64 sifive_pwm_write(uint64_t data, uint64_t offset) "Write 0x%" PRIx64 " at address: 0x%" PRIx64 + +# sh_timer.c +sh_timer_start_stop(int enable, int current) "%d (%d)" +sh_timer_read(uint64_t offset) "tmu012_read 0x%" PRIx64 +sh_timer_write(uint64_t offset, uint64_t value) "tmu012_write 0x%" PRIx64 " 0x%08" PRIx64 diff --git a/hw/tpm/tpm_ppi.c b/hw/tpm/tpm_ppi.c index 362edcc5c9..274e9aa4b0 100644 --- a/hw/tpm/tpm_ppi.c +++ b/hw/tpm/tpm_ppi.c @@ -30,11 +30,14 @@ void tpm_ppi_reset(TPMPPI *tpmppi) guest_phys_blocks_init(&guest_phys_blocks); guest_phys_blocks_append(&guest_phys_blocks); QTAILQ_FOREACH(block, &guest_phys_blocks.head, next) { + hwaddr mr_offs = block->host_addr - + (uint8_t *)memory_region_get_ram_ptr(block->mr); + trace_tpm_ppi_memset(block->host_addr, block->target_end - block->target_start); memset(block->host_addr, 0, block->target_end - block->target_start); - memory_region_set_dirty(block->mr, 0, + memory_region_set_dirty(block->mr, mr_offs, block->target_end - block->target_start); } guest_phys_blocks_free(&guest_phys_blocks); diff --git a/hw/usb/bus.c b/hw/usb/bus.c index 07083349f5..92d6ed5626 100644 --- a/hw/usb/bus.c +++ b/hw/usb/bus.c @@ -2,6 +2,8 @@ #include "hw/qdev-properties.h" #include "hw/usb.h" #include "qapi/error.h" +#include "qapi/qapi-commands-machine.h" +#include "qapi/type-helpers.h" #include "qemu/error-report.h" #include "qemu/module.h" #include "sysemu/sysemu.h" @@ -82,7 +84,7 @@ const VMStateDescription vmstate_usb_device = { void usb_bus_new(USBBus *bus, size_t bus_size, USBBusOps *ops, DeviceState *host) { - qbus_create_inplace(bus, bus_size, TYPE_USB_BUS, host, NULL); + qbus_init(bus, bus_size, TYPE_USB_BUS, host, NULL); qbus_set_bus_hotplug_handler(BUS(bus)); bus->ops = ops; bus->busnr = next_usb_bus++; @@ -631,15 +633,16 @@ static char *usb_get_fw_dev_path(DeviceState *qdev) return fw_path; } -void hmp_info_usb(Monitor *mon, const QDict *qdict) +HumanReadableText *qmp_x_query_usb(Error **errp) { + g_autoptr(GString) buf = g_string_new(""); USBBus *bus; USBDevice *dev; USBPort *port; if (QTAILQ_EMPTY(&busses)) { - monitor_printf(mon, "USB support not enabled\n"); - return; + error_setg(errp, "USB support not enabled"); + return NULL; } QTAILQ_FOREACH(bus, &busses, next) { @@ -647,14 +650,17 @@ void hmp_info_usb(Monitor *mon, const QDict *qdict) dev = port->dev; if (!dev) continue; - monitor_printf(mon, " Device %d.%d, Port %s, Speed %s Mb/s, " - "Product %s%s%s\n", - bus->busnr, dev->addr, port->path, - usb_speed(dev->speed), dev->product_desc, - dev->qdev.id ? ", ID: " : "", - dev->qdev.id ?: ""); + g_string_append_printf(buf, + " Device %d.%d, Port %s, Speed %s Mb/s, " + "Product %s%s%s\n", + bus->busnr, dev->addr, port->path, + usb_speed(dev->speed), dev->product_desc, + dev->qdev.id ? ", ID: " : "", + dev->qdev.id ?: ""); } } + + return human_readable_text_from_str(buf); } /* handle legacy -usbdevice cmd line option */ diff --git a/hw/usb/dev-smartcard-reader.c b/hw/usb/dev-smartcard-reader.c index f7043075be..91ffd9f8ae 100644 --- a/hw/usb/dev-smartcard-reader.c +++ b/hw/usb/dev-smartcard-reader.c @@ -1320,8 +1320,7 @@ static void ccid_realize(USBDevice *dev, Error **errp) usb_desc_create_serial(dev); usb_desc_init(dev); - qbus_create_inplace(&s->bus, sizeof(s->bus), TYPE_CCID_BUS, DEVICE(dev), - NULL); + qbus_init(&s->bus, sizeof(s->bus), TYPE_CCID_BUS, DEVICE(dev), NULL); qbus_set_hotplug_handler(BUS(&s->bus), OBJECT(dev)); s->intr = usb_ep_get(dev, USB_TOKEN_IN, CCID_INT_IN_EP); s->bulk = usb_ep_get(dev, USB_TOKEN_IN, CCID_BULK_IN_EP); diff --git a/hw/usb/dev-storage-bot.c b/hw/usb/dev-storage-bot.c index 68ebaca10c..b24b3148c2 100644 --- a/hw/usb/dev-storage-bot.c +++ b/hw/usb/dev-storage-bot.c @@ -37,8 +37,7 @@ static void usb_msd_bot_realize(USBDevice *dev, Error **errp) s->dev.auto_attach = 0; } - scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(dev), - &usb_msd_scsi_info_bot, NULL); + scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(dev), &usb_msd_scsi_info_bot); usb_msd_handle_reset(dev); } diff --git a/hw/usb/dev-storage-classic.c b/hw/usb/dev-storage-classic.c index 3d017a4e67..00f25bade2 100644 --- a/hw/usb/dev-storage-classic.c +++ b/hw/usb/dev-storage-classic.c @@ -65,8 +65,8 @@ static void usb_msd_storage_realize(USBDevice *dev, Error **errp) usb_desc_create_serial(dev); usb_desc_init(dev); dev->flags |= (1 << USB_DEV_FLAG_IS_SCSI_STORAGE); - scsi_bus_new(&s->bus, sizeof(s->bus), DEVICE(dev), - &usb_msd_scsi_info_storage, NULL); + scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(dev), + &usb_msd_scsi_info_storage); scsi_dev = scsi_bus_legacy_add_drive(&s->bus, blk, 0, !!s->removable, s->conf.bootindex, s->conf.share_rw, s->conf.rerror, s->conf.werror, diff --git a/hw/usb/dev-uas.c b/hw/usb/dev-uas.c index f6309a5ebf..599d6b52a0 100644 --- a/hw/usb/dev-uas.c +++ b/hw/usb/dev-uas.c @@ -938,8 +938,7 @@ static void usb_uas_realize(USBDevice *dev, Error **errp) uas->status_bh = qemu_bh_new(usb_uas_send_status_bh, uas); dev->flags |= (1 << USB_DEV_FLAG_IS_SCSI_STORAGE); - scsi_bus_new(&uas->bus, sizeof(uas->bus), DEVICE(dev), - &usb_uas_scsi_info, NULL); + scsi_bus_init(&uas->bus, sizeof(uas->bus), DEVICE(dev), &usb_uas_scsi_info); } static const VMStateDescription vmstate_usb_uas = { diff --git a/hw/usb/hcd-uhci.c b/hw/usb/hcd-uhci.c index 0cb02a6432..d1b5657d72 100644 --- a/hw/usb/hcd-uhci.c +++ b/hw/usb/hcd-uhci.c @@ -31,6 +31,7 @@ #include "hw/usb/uhci-regs.h" #include "migration/vmstate.h" #include "hw/pci/pci.h" +#include "hw/irq.h" #include "hw/qdev-properties.h" #include "qapi/error.h" #include "qemu/timer.h" @@ -290,7 +291,7 @@ static UHCIAsync *uhci_async_find_td(UHCIState *s, uint32_t td_addr) static void uhci_update_irq(UHCIState *s) { - int level; + int level = 0; if (((s->status2 & 1) && (s->intr & (1 << 2))) || ((s->status2 & 2) && (s->intr & (1 << 3))) || ((s->status & UHCI_STS_USBERR) && (s->intr & (1 << 0))) || @@ -298,10 +299,8 @@ static void uhci_update_irq(UHCIState *s) (s->status & UHCI_STS_HSERR) || (s->status & UHCI_STS_HCPERR)) { level = 1; - } else { - level = 0; } - pci_set_irq(&s->dev, level); + qemu_set_irq(s->irq, level); } static void uhci_reset(DeviceState *dev) @@ -1170,9 +1169,9 @@ void usb_uhci_common_realize(PCIDevice *dev, Error **errp) pci_conf[PCI_CLASS_PROG] = 0x00; /* TODO: reset value should be 0. */ - pci_conf[USB_SBRN] = USB_RELEASE_1; // release number - + pci_conf[USB_SBRN] = USB_RELEASE_1; /* release number */ pci_config_set_interrupt_pin(pci_conf, u->info.irq_pin + 1); + s->irq = pci_allocate_irq(dev); if (s->masterbus) { USBPort *ports[NB_PORTS]; @@ -1285,6 +1284,9 @@ void uhci_data_class_init(ObjectClass *klass, void *data) } else { device_class_set_props(dc, uhci_properties_standalone); } + if (info->notuser) { + dc->user_creatable = false; + } u->info = *info; } diff --git a/hw/usb/hcd-uhci.h b/hw/usb/hcd-uhci.h index e61d8fcb19..c85ab7868e 100644 --- a/hw/usb/hcd-uhci.h +++ b/hw/usb/hcd-uhci.h @@ -60,7 +60,7 @@ typedef struct UHCIState { uint32_t frame_bandwidth; bool completions_only; UHCIPort ports[NB_PORTS]; - + qemu_irq irq; /* Interrupts that should be raised at the end of the current frame. */ uint32_t pending_int_mask; @@ -85,6 +85,7 @@ typedef struct UHCIInfo { uint8_t irq_pin; void (*realize)(PCIDevice *dev, Error **errp); bool unplug; + bool notuser; /* disallow user_creatable */ } UHCIInfo; void uhci_data_class_init(ObjectClass *klass, void *data); diff --git a/hw/usb/vt82c686-uhci-pci.c b/hw/usb/vt82c686-uhci-pci.c index b109c21603..0bf2b72ff0 100644 --- a/hw/usb/vt82c686-uhci-pci.c +++ b/hw/usb/vt82c686-uhci-pci.c @@ -1,6 +1,17 @@ #include "qemu/osdep.h" +#include "hw/irq.h" +#include "hw/isa/vt82c686.h" #include "hcd-uhci.h" +static void uhci_isa_set_irq(void *opaque, int irq_num, int level) +{ + UHCIState *s = opaque; + uint8_t irq = pci_get_byte(s->dev.config + PCI_INTERRUPT_LINE); + if (irq > 0 && irq < 15) { + via_isa_set_irq(pci_get_function_0(&s->dev), irq, level); + } +} + static void usb_uhci_vt82c686b_realize(PCIDevice *dev, Error **errp) { UHCIState *s = UHCI(dev); @@ -14,6 +25,8 @@ static void usb_uhci_vt82c686b_realize(PCIDevice *dev, Error **errp) pci_set_long(pci_conf + 0xc0, 0x00002000); usb_uhci_common_realize(dev, errp); + object_unref(s->irq); + s->irq = qemu_allocate_irq(uhci_isa_set_irq, s, 0); } static UHCIInfo uhci_info[] = { @@ -25,6 +38,8 @@ static UHCIInfo uhci_info[] = { .irq_pin = 3, .realize = usb_uhci_vt82c686b_realize, .unplug = true, + /* Reason: only works as USB function of VT82xx superio chips */ + .notuser = true, } }; diff --git a/hw/vfio/common.c b/hw/vfio/common.c index 8728d4d5c2..dd387b0d39 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -562,6 +562,7 @@ static bool vfio_listener_skipped_section(MemoryRegionSection *section) { return (!memory_region_is_ram(section->mr) && !memory_region_is_iommu(section->mr)) || + memory_region_is_protected(section->mr) || /* * Sizing an enabled 64-bit BAR can cause spurious mappings to * addresses in the upper part of the 64-bit address space. These @@ -892,6 +893,13 @@ static void vfio_listener_region_add(MemoryListener *listener, llend = int128_and(llend, int128_exts64(qemu_real_host_page_mask)); if (int128_ge(int128_make64(iova), llend)) { + if (memory_region_is_ram_device(section->mr)) { + trace_vfio_listener_region_add_no_dma_map( + memory_region_name(section->mr), + section->offset_within_address_space, + int128_getlo(section->size), + qemu_real_host_page_size); + } return; } end = int128_get64(int128_sub(llend, int128_one())); @@ -1434,6 +1442,7 @@ static void vfio_listener_log_sync(MemoryListener *listener, } static const MemoryListener vfio_memory_listener = { + .name = "vfio", .region_add = vfio_listener_region_add, .region_del = vfio_listener_region_del, .log_global_start = vfio_listener_log_global_start, diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c index 4feaa1cb68..7b45353ce2 100644 --- a/hw/vfio/pci.c +++ b/hw/vfio/pci.c @@ -29,10 +29,10 @@ #include "hw/qdev-properties.h" #include "hw/qdev-properties-system.h" #include "migration/vmstate.h" +#include "qapi/qmp/qdict.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" #include "qemu/module.h" -#include "qemu/option.h" #include "qemu/range.h" #include "qemu/units.h" #include "sysemu/kvm.h" @@ -941,7 +941,7 @@ static void vfio_pci_size_rom(VFIOPCIDevice *vdev) } if (vfio_opt_rom_in_denylist(vdev)) { - if (dev->opts && qemu_opt_get(dev->opts, "rombar")) { + if (dev->opts && qdict_haskey(dev->opts, "rombar")) { warn_report("Device at %s is known to cause system instability" " issues during option rom execution", vdev->vbasedev.name); @@ -2453,7 +2453,12 @@ static int vfio_pci_load_config(VFIODevice *vbasedev, QEMUFile *f) { VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); PCIDevice *pdev = &vdev->pdev; - int ret; + pcibus_t old_addr[PCI_NUM_REGIONS - 1]; + int bar, ret; + + for (bar = 0; bar < PCI_ROM_SLOT; bar++) { + old_addr[bar] = pdev->io_regions[bar].addr; + } ret = vmstate_load_state(f, &vmstate_vfio_pci_config, vdev, 1); if (ret) { @@ -2463,6 +2468,18 @@ static int vfio_pci_load_config(VFIODevice *vbasedev, QEMUFile *f) vfio_pci_write_config(pdev, PCI_COMMAND, pci_get_word(pdev->config + PCI_COMMAND), 2); + for (bar = 0; bar < PCI_ROM_SLOT; bar++) { + /* + * The address may not be changed in some scenarios + * (e.g. the VF driver isn't loaded in VM). + */ + if (old_addr[bar] != pdev->io_regions[bar].addr && + vdev->bars[bar].region.size > 0 && + vdev->bars[bar].region.size < qemu_real_host_page_size) { + vfio_sub_page_bar_update_mapping(pdev, bar); + } + } + if (msi_enabled(pdev)) { vfio_msi_enable(vdev); } else if (msix_enabled(pdev)) { diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c index ea3f70bd2f..04c6e67f8f 100644 --- a/hw/vfio/spapr.c +++ b/hw/vfio/spapr.c @@ -136,6 +136,7 @@ static void vfio_prereg_listener_region_del(MemoryListener *listener, } const MemoryListener vfio_prereg_listener = { + .name = "vfio-pre-reg", .region_add = vfio_prereg_listener_region_add, .region_del = vfio_prereg_listener_region_del, }; diff --git a/hw/virtio/Kconfig b/hw/virtio/Kconfig index 35ab45e209..c144d42f9b 100644 --- a/hw/virtio/Kconfig +++ b/hw/virtio/Kconfig @@ -63,3 +63,8 @@ config VHOST_USER_I2C bool default y depends on VIRTIO && VHOST_USER + +config VHOST_USER_RNG + bool + default y + depends on VIRTIO && VHOST_USER diff --git a/hw/virtio/meson.build b/hw/virtio/meson.build index bc352a6009..521f7d64a8 100644 --- a/hw/virtio/meson.build +++ b/hw/virtio/meson.build @@ -27,6 +27,8 @@ virtio_ss.add(when: 'CONFIG_VIRTIO_IOMMU', if_true: files('virtio-iommu.c')) virtio_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-mem.c')) virtio_ss.add(when: 'CONFIG_VHOST_USER_I2C', if_true: files('vhost-user-i2c.c')) virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_I2C'], if_true: files('vhost-user-i2c-pci.c')) +virtio_ss.add(when: 'CONFIG_VHOST_USER_RNG', if_true: files('vhost-user-rng.c')) +virtio_ss.add(when: ['CONFIG_VHOST_USER_RNG', 'CONFIG_VIRTIO_PCI'], if_true: files('vhost-user-rng-pci.c')) virtio_pci_ss = ss.source_set() virtio_pci_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock-pci.c')) diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events index 8ed19e9d0c..650e521e35 100644 --- a/hw/virtio/trace-events +++ b/hw/virtio/trace-events @@ -52,6 +52,7 @@ vhost_vdpa_set_vring_call(void *dev, unsigned int index, int fd) "dev: %p index: vhost_vdpa_get_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRIx64 vhost_vdpa_set_owner(void *dev) "dev: %p" vhost_vdpa_vq_get_addr(void *dev, void *vq, uint64_t desc_user_addr, uint64_t avail_user_addr, uint64_t used_user_addr) "dev: %p vq: %p desc_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64 +vhost_vdpa_get_iova_range(void *dev, uint64_t first, uint64_t last) "dev: %p first: 0x%"PRIx64" last: 0x%"PRIx64 # virtio.c virtqueue_alloc_element(void *elem, size_t sz, unsigned in_num, unsigned out_num) "elem %p size %zd in_num %u out_num %u" diff --git a/hw/virtio/vhost-user-rng-pci.c b/hw/virtio/vhost-user-rng-pci.c new file mode 100644 index 0000000000..c83dc86813 --- /dev/null +++ b/hw/virtio/vhost-user-rng-pci.c @@ -0,0 +1,79 @@ +/* + * Vhost-user RNG virtio device PCI glue + * + * Copyright (c) 2021 Mathieu Poirier + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/vhost-user-rng.h" +#include "virtio-pci.h" + +struct VHostUserRNGPCI { + VirtIOPCIProxy parent_obj; + VHostUserRNG vdev; +}; + +typedef struct VHostUserRNGPCI VHostUserRNGPCI; + +#define TYPE_VHOST_USER_RNG_PCI "vhost-user-rng-pci-base" + +DECLARE_INSTANCE_CHECKER(VHostUserRNGPCI, VHOST_USER_RNG_PCI, + TYPE_VHOST_USER_RNG_PCI) + +static Property vhost_user_rng_pci_properties[] = { + DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, + DEV_NVECTORS_UNSPECIFIED), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vhost_user_rng_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VHostUserRNGPCI *dev = VHOST_USER_RNG_PCI(vpci_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + + if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { + vpci_dev->nvectors = 1; + } + + qdev_realize(vdev, BUS(&vpci_dev->bus), errp); +} + +static void vhost_user_rng_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); + k->realize = vhost_user_rng_pci_realize; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + device_class_set_props(dc, vhost_user_rng_pci_properties); + pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; + pcidev_k->device_id = 0; /* Set by virtio-pci based on virtio id */ + pcidev_k->revision = 0x00; + pcidev_k->class_id = PCI_CLASS_OTHERS; +} + +static void vhost_user_rng_pci_instance_init(Object *obj) +{ + VHostUserRNGPCI *dev = VHOST_USER_RNG_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VHOST_USER_RNG); +} + +static const VirtioPCIDeviceTypeInfo vhost_user_rng_pci_info = { + .base_name = TYPE_VHOST_USER_RNG_PCI, + .non_transitional_name = "vhost-user-rng-pci", + .instance_size = sizeof(VHostUserRNGPCI), + .instance_init = vhost_user_rng_pci_instance_init, + .class_init = vhost_user_rng_pci_class_init, +}; + +static void vhost_user_rng_pci_register(void) +{ + virtio_pci_types_register(&vhost_user_rng_pci_info); +} + +type_init(vhost_user_rng_pci_register); diff --git a/hw/virtio/vhost-user-rng.c b/hw/virtio/vhost-user-rng.c new file mode 100644 index 0000000000..209ee5bf9a --- /dev/null +++ b/hw/virtio/vhost-user-rng.c @@ -0,0 +1,289 @@ +/* + * Vhost-user RNG virtio device + * + * Copyright (c) 2021 Mathieu Poirier + * + * Implementation seriously tailored on vhost-user-i2c.c + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/vhost-user-rng.h" +#include "qemu/error-report.h" +#include "standard-headers/linux/virtio_ids.h" + +static void vu_rng_start(VirtIODevice *vdev) +{ + VHostUserRNG *rng = VHOST_USER_RNG(vdev); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + int ret; + int i; + + if (!k->set_guest_notifiers) { + error_report("binding does not support guest notifiers"); + return; + } + + ret = vhost_dev_enable_notifiers(&rng->vhost_dev, vdev); + if (ret < 0) { + error_report("Error enabling host notifiers: %d", -ret); + return; + } + + ret = k->set_guest_notifiers(qbus->parent, rng->vhost_dev.nvqs, true); + if (ret < 0) { + error_report("Error binding guest notifier: %d", -ret); + goto err_host_notifiers; + } + + rng->vhost_dev.acked_features = vdev->guest_features; + ret = vhost_dev_start(&rng->vhost_dev, vdev); + if (ret < 0) { + error_report("Error starting vhost-user-rng: %d", -ret); + goto err_guest_notifiers; + } + + /* + * guest_notifier_mask/pending not used yet, so just unmask + * everything here. virtio-pci will do the right thing by + * enabling/disabling irqfd. + */ + for (i = 0; i < rng->vhost_dev.nvqs; i++) { + vhost_virtqueue_mask(&rng->vhost_dev, vdev, i, false); + } + + return; + +err_guest_notifiers: + k->set_guest_notifiers(qbus->parent, rng->vhost_dev.nvqs, false); +err_host_notifiers: + vhost_dev_disable_notifiers(&rng->vhost_dev, vdev); +} + +static void vu_rng_stop(VirtIODevice *vdev) +{ + VHostUserRNG *rng = VHOST_USER_RNG(vdev); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + int ret; + + if (!k->set_guest_notifiers) { + return; + } + + vhost_dev_stop(&rng->vhost_dev, vdev); + + ret = k->set_guest_notifiers(qbus->parent, rng->vhost_dev.nvqs, false); + if (ret < 0) { + error_report("vhost guest notifier cleanup failed: %d", ret); + return; + } + + vhost_dev_disable_notifiers(&rng->vhost_dev, vdev); +} + +static void vu_rng_set_status(VirtIODevice *vdev, uint8_t status) +{ + VHostUserRNG *rng = VHOST_USER_RNG(vdev); + bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK; + + if (!vdev->vm_running) { + should_start = false; + } + + if (rng->vhost_dev.started == should_start) { + return; + } + + if (should_start) { + vu_rng_start(vdev); + } else { + vu_rng_stop(vdev); + } +} + +static uint64_t vu_rng_get_features(VirtIODevice *vdev, + uint64_t requested_features, Error **errp) +{ + /* No feature bits used yet */ + return requested_features; +} + +static void vu_rng_handle_output(VirtIODevice *vdev, VirtQueue *vq) +{ + /* + * Not normally called; it's the daemon that handles the queue; + * however virtio's cleanup path can call this. + */ +} + +static void vu_rng_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask) +{ + VHostUserRNG *rng = VHOST_USER_RNG(vdev); + + vhost_virtqueue_mask(&rng->vhost_dev, vdev, idx, mask); +} + +static bool vu_rng_guest_notifier_pending(VirtIODevice *vdev, int idx) +{ + VHostUserRNG *rng = VHOST_USER_RNG(vdev); + + return vhost_virtqueue_pending(&rng->vhost_dev, idx); +} + +static void vu_rng_connect(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserRNG *rng = VHOST_USER_RNG(vdev); + + if (rng->connected) { + return; + } + + rng->connected = true; + + /* restore vhost state */ + if (virtio_device_started(vdev, vdev->status)) { + vu_rng_start(vdev); + } +} + +static void vu_rng_disconnect(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserRNG *rng = VHOST_USER_RNG(vdev); + + if (!rng->connected) { + return; + } + + rng->connected = false; + + if (rng->vhost_dev.started) { + vu_rng_stop(vdev); + } +} + +static void vu_rng_event(void *opaque, QEMUChrEvent event) +{ + DeviceState *dev = opaque; + + switch (event) { + case CHR_EVENT_OPENED: + vu_rng_connect(dev); + break; + case CHR_EVENT_CLOSED: + vu_rng_disconnect(dev); + break; + case CHR_EVENT_BREAK: + case CHR_EVENT_MUX_IN: + case CHR_EVENT_MUX_OUT: + /* Ignore */ + break; + } +} + +static void vu_rng_device_realize(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserRNG *rng = VHOST_USER_RNG(dev); + int ret; + + if (!rng->chardev.chr) { + error_setg(errp, "missing chardev"); + return; + } + + if (!vhost_user_init(&rng->vhost_user, &rng->chardev, errp)) { + return; + } + + virtio_init(vdev, "vhost-user-rng", VIRTIO_ID_RNG, 0); + + rng->req_vq = virtio_add_queue(vdev, 4, vu_rng_handle_output); + if (!rng->req_vq) { + error_setg_errno(errp, -1, "virtio_add_queue() failed"); + goto virtio_add_queue_failed; + } + + rng->vhost_dev.nvqs = 1; + rng->vhost_dev.vqs = g_new0(struct vhost_virtqueue, rng->vhost_dev.nvqs); + ret = vhost_dev_init(&rng->vhost_dev, &rng->vhost_user, + VHOST_BACKEND_TYPE_USER, 0, errp); + if (ret < 0) { + error_setg_errno(errp, -ret, "vhost_dev_init() failed"); + goto vhost_dev_init_failed; + } + + qemu_chr_fe_set_handlers(&rng->chardev, NULL, NULL, vu_rng_event, NULL, + dev, NULL, true); + + return; + +vhost_dev_init_failed: + virtio_delete_queue(rng->req_vq); +virtio_add_queue_failed: + virtio_cleanup(vdev); + vhost_user_cleanup(&rng->vhost_user); +} + +static void vu_rng_device_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VHostUserRNG *rng = VHOST_USER_RNG(dev); + + vu_rng_set_status(vdev, 0); + + vhost_dev_cleanup(&rng->vhost_dev); + g_free(rng->vhost_dev.vqs); + rng->vhost_dev.vqs = NULL; + virtio_delete_queue(rng->req_vq); + virtio_cleanup(vdev); + vhost_user_cleanup(&rng->vhost_user); +} + +static const VMStateDescription vu_rng_vmstate = { + .name = "vhost-user-rng", + .unmigratable = 1, +}; + +static Property vu_rng_properties[] = { + DEFINE_PROP_CHR("chardev", VHostUserRNG, chardev), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vu_rng_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + device_class_set_props(dc, vu_rng_properties); + dc->vmsd = &vu_rng_vmstate; + set_bit(DEVICE_CATEGORY_INPUT, dc->categories); + + vdc->realize = vu_rng_device_realize; + vdc->unrealize = vu_rng_device_unrealize; + vdc->get_features = vu_rng_get_features; + vdc->set_status = vu_rng_set_status; + vdc->guest_notifier_mask = vu_rng_guest_notifier_mask; + vdc->guest_notifier_pending = vu_rng_guest_notifier_pending; +} + +static const TypeInfo vu_rng_info = { + .name = TYPE_VHOST_USER_RNG, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VHostUserRNG), + .class_init = vu_rng_class_init, +}; + +static void vu_rng_register_types(void) +{ + type_register_static(&vu_rng_info); +} + +type_init(vu_rng_register_types) diff --git a/hw/virtio/vhost-user-vsock.c b/hw/virtio/vhost-user-vsock.c index 6095ed7349..52bd682c34 100644 --- a/hw/virtio/vhost-user-vsock.c +++ b/hw/virtio/vhost-user-vsock.c @@ -81,7 +81,9 @@ static uint64_t vuv_get_features(VirtIODevice *vdev, { VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); - return vhost_get_features(&vvc->vhost_dev, user_feature_bits, features); + features = vhost_get_features(&vvc->vhost_dev, user_feature_bits, features); + + return vhost_vsock_common_get_features(vdev, features, errp); } static const VMStateDescription vuv_vmstate = { diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index 2c8556237f..bf6e50223c 100644 --- a/hw/virtio/vhost-user.c +++ b/hw/virtio/vhost-user.c @@ -1526,8 +1526,9 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev, name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]", user, queue_idx); - memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name, - page_size, addr); + if (!n->mr.ram) /* Don't init again after suspend. */ + memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name, + page_size, addr); g_free(name); if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) { diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index 7633ea66d1..0d8051426c 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -24,19 +24,49 @@ #include "trace.h" #include "qemu-common.h" -static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section) +/* + * Return one past the end of the end of section. Be careful with uint64_t + * conversions! + */ +static Int128 vhost_vdpa_section_end(const MemoryRegionSection *section) { - return (!memory_region_is_ram(section->mr) && - !memory_region_is_iommu(section->mr)) || - /* vhost-vDPA doesn't allow MMIO to be mapped */ - memory_region_is_ram_device(section->mr) || - /* - * Sizing an enabled 64-bit BAR can cause spurious mappings to - * addresses in the upper part of the 64-bit address space. These - * are never accessed by the CPU and beyond the address width of - * some IOMMU hardware. TODO: VDPA should tell us the IOMMU width. - */ - section->offset_within_address_space & (1ULL << 63); + Int128 llend = int128_make64(section->offset_within_address_space); + llend = int128_add(llend, section->size); + llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); + + return llend; +} + +static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section, + uint64_t iova_min, + uint64_t iova_max) +{ + Int128 llend; + + if ((!memory_region_is_ram(section->mr) && + !memory_region_is_iommu(section->mr)) || + memory_region_is_protected(section->mr) || + /* vhost-vDPA doesn't allow MMIO to be mapped */ + memory_region_is_ram_device(section->mr)) { + return true; + } + + if (section->offset_within_address_space < iova_min) { + error_report("RAM section out of device range (min=0x%" PRIx64 + ", addr=0x%" HWADDR_PRIx ")", + iova_min, section->offset_within_address_space); + return true; + } + + llend = vhost_vdpa_section_end(section); + if (int128_gt(llend, int128_make64(iova_max))) { + error_report("RAM section out of device range (max=0x%" PRIx64 + ", end addr=0x%" PRIx64 ")", + iova_max, int128_get64(llend)); + return true; + } + + return false; } static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size, @@ -148,7 +178,8 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, void *vaddr; int ret; - if (vhost_vdpa_listener_skipped_section(section)) { + if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first, + v->iova_range.last)) { return; } @@ -159,10 +190,7 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, } iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); - llend = int128_make64(section->offset_within_address_space); - llend = int128_add(llend, section->size); - llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); - + llend = vhost_vdpa_section_end(section); if (int128_ge(int128_make64(iova), llend)) { return; } @@ -209,7 +237,8 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, Int128 llend, llsize; int ret; - if (vhost_vdpa_listener_skipped_section(section)) { + if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first, + v->iova_range.last)) { return; } @@ -220,9 +249,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, } iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); - llend = int128_make64(section->offset_within_address_space); - llend = int128_add(llend, section->size); - llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK)); + llend = vhost_vdpa_section_end(section); trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend)); @@ -246,6 +273,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, * depends on the addnop(). */ static const MemoryListener vhost_vdpa_memory_listener = { + .name = "vhost-vdpa", .commit = vhost_vdpa_listener_commit, .region_add = vhost_vdpa_listener_region_add, .region_del = vhost_vdpa_listener_region_del, @@ -278,11 +306,42 @@ static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status) vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s); } +static void vhost_vdpa_get_iova_range(struct vhost_vdpa *v) +{ + int ret = vhost_vdpa_call(v->dev, VHOST_VDPA_GET_IOVA_RANGE, + &v->iova_range); + if (ret != 0) { + v->iova_range.first = 0; + v->iova_range.last = UINT64_MAX; + } + + trace_vhost_vdpa_get_iova_range(v->dev, v->iova_range.first, + v->iova_range.last); +} + +static bool vhost_vdpa_one_time_request(struct vhost_dev *dev) +{ + struct vhost_vdpa *v = dev->opaque; + + return v->index != 0; +} + static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp) { struct vhost_vdpa *v; assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA); trace_vhost_vdpa_init(dev, opaque); + int ret; + + /* + * Similar to VFIO, we end up pinning all guest memory and have to + * disable discarding of RAM. + */ + ret = ram_block_discard_disable(true); + if (ret) { + error_report("Cannot set discarding of RAM broken"); + return ret; + } v = opaque; v->dev = dev; @@ -290,6 +349,12 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp) v->listener = vhost_vdpa_memory_listener; v->msg_type = VHOST_IOTLB_MSG_V2; + vhost_vdpa_get_iova_range(v); + + if (vhost_vdpa_one_time_request(dev)) { + return 0; + } + vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER); @@ -388,6 +453,8 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev) memory_listener_unregister(&v->listener); dev->opaque = NULL; + ram_block_discard_disable(false); + return 0; } @@ -400,6 +467,10 @@ static int vhost_vdpa_memslots_limit(struct vhost_dev *dev) static int vhost_vdpa_set_mem_table(struct vhost_dev *dev, struct vhost_memory *mem) { + if (vhost_vdpa_one_time_request(dev)) { + return 0; + } + trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding); if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) && trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) { @@ -423,6 +494,11 @@ static int vhost_vdpa_set_features(struct vhost_dev *dev, uint64_t features) { int ret; + + if (vhost_vdpa_one_time_request(dev)) { + return 0; + } + trace_vhost_vdpa_set_features(dev, features); ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features); uint8_t status = 0; @@ -447,9 +523,12 @@ static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev) } features &= f; - r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features); - if (r) { - return -EFAULT; + + if (vhost_vdpa_one_time_request(dev)) { + r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features); + if (r) { + return -EFAULT; + } } dev->backend_cap = features; @@ -480,8 +559,8 @@ static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx) { assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs); - trace_vhost_vdpa_get_vq_index(dev, idx, idx - dev->vq_index); - return idx - dev->vq_index; + trace_vhost_vdpa_get_vq_index(dev, idx, idx); + return idx; } static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev) @@ -558,11 +637,21 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) { struct vhost_vdpa *v = dev->opaque; trace_vhost_vdpa_dev_start(dev, started); + + if (started) { + vhost_vdpa_host_notifiers_init(dev); + vhost_vdpa_set_vring_ready(dev); + } else { + vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); + } + + if (dev->vq_index + dev->nvqs != dev->last_index) { + return 0; + } + if (started) { uint8_t status = 0; memory_listener_register(&v->listener, &address_space_memory); - vhost_vdpa_host_notifiers_init(dev); - vhost_vdpa_set_vring_ready(dev); vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status); @@ -571,7 +660,6 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) vhost_vdpa_reset_device(dev); vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER); - vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); memory_listener_unregister(&v->listener); return 0; @@ -581,6 +669,10 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base, struct vhost_log *log) { + if (vhost_vdpa_one_time_request(dev)) { + return 0; + } + trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd, log->log); return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base); @@ -646,6 +738,10 @@ static int vhost_vdpa_get_features(struct vhost_dev *dev, static int vhost_vdpa_set_owner(struct vhost_dev *dev) { + if (vhost_vdpa_one_time_request(dev)) { + return 0; + } + trace_vhost_vdpa_set_owner(dev); return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL); } diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c index 4ad6e234ad..3f3771274e 100644 --- a/hw/virtio/vhost-vsock-common.c +++ b/hw/virtio/vhost-vsock-common.c @@ -18,6 +18,30 @@ #include "qemu/iov.h" #include "monitor/monitor.h" +const int feature_bits[] = { + VIRTIO_VSOCK_F_SEQPACKET, + VHOST_INVALID_FEATURE_BIT +}; + +uint64_t vhost_vsock_common_get_features(VirtIODevice *vdev, uint64_t features, + Error **errp) +{ + VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); + + if (vvc->seqpacket != ON_OFF_AUTO_OFF) { + virtio_add_feature(&features, VIRTIO_VSOCK_F_SEQPACKET); + } + + features = vhost_get_features(&vvc->vhost_dev, feature_bits, features); + + if (vvc->seqpacket == ON_OFF_AUTO_ON && + !virtio_has_feature(features, VIRTIO_VSOCK_F_SEQPACKET)) { + error_setg(errp, "vhost-vsock backend doesn't support seqpacket"); + } + + return features; +} + int vhost_vsock_common_start(VirtIODevice *vdev) { VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); @@ -231,11 +255,18 @@ void vhost_vsock_common_unrealize(VirtIODevice *vdev) virtio_cleanup(vdev); } +static Property vhost_vsock_common_properties[] = { + DEFINE_PROP_ON_OFF_AUTO("seqpacket", VHostVSockCommon, seqpacket, + ON_OFF_AUTO_AUTO), + DEFINE_PROP_END_OF_LIST(), +}; + static void vhost_vsock_common_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + device_class_set_props(dc, vhost_vsock_common_properties); set_bit(DEVICE_CATEGORY_MISC, dc->categories); vdc->guest_notifier_mask = vhost_vsock_common_guest_notifier_mask; vdc->guest_notifier_pending = vhost_vsock_common_guest_notifier_pending; diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c index 1b1a5c70ed..478c0c9a87 100644 --- a/hw/virtio/vhost-vsock.c +++ b/hw/virtio/vhost-vsock.c @@ -21,11 +21,6 @@ #include "hw/virtio/vhost-vsock.h" #include "monitor/monitor.h" -const int feature_bits[] = { - VIRTIO_VSOCK_F_SEQPACKET, - VHOST_INVALID_FEATURE_BIT -}; - static void vhost_vsock_get_config(VirtIODevice *vdev, uint8_t *config) { VHostVSock *vsock = VHOST_VSOCK(vdev); @@ -113,11 +108,7 @@ static uint64_t vhost_vsock_get_features(VirtIODevice *vdev, uint64_t requested_features, Error **errp) { - VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); - - virtio_add_feature(&requested_features, VIRTIO_VSOCK_F_SEQPACKET); - return vhost_get_features(&vvc->vhost_dev, feature_bits, - requested_features); + return vhost_vsock_common_get_features(vdev, requested_features, errp); } static const VMStateDescription vmstate_virtio_vhost_vsock = { diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index b4b29413e6..437347ad01 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -1366,6 +1366,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, hdev->features = features; hdev->memory_listener = (MemoryListener) { + .name = "vhost", .begin = vhost_begin, .commit = vhost_commit, .region_add = vhost_region_addnop, @@ -1381,6 +1382,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, }; hdev->iommu_listener = (MemoryListener) { + .name = "vhost-iommu", .region_add = vhost_iommu_region_add, .region_del = vhost_iommu_region_del, }; diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c index 5a69dce35d..c6962fcbfe 100644 --- a/hw/virtio/virtio-balloon.c +++ b/hw/virtio/virtio-balloon.c @@ -852,7 +852,7 @@ static const VMStateDescription vmstate_virtio_balloon_free_page_hint = { }; static const VMStateDescription vmstate_virtio_balloon_page_poison = { - .name = "vitio-balloon-device/page-poison", + .name = "virtio-balloon-device/page-poison", .version_id = 1, .minimum_version_id = 1, .needed = virtio_balloon_page_poison_support, diff --git a/hw/virtio/virtio-iommu-pci.c b/hw/virtio/virtio-iommu-pci.c index 770c286be7..a160ae6b41 100644 --- a/hw/virtio/virtio-iommu-pci.c +++ b/hw/virtio/virtio-iommu-pci.c @@ -98,9 +98,7 @@ static void virtio_iommu_pci_instance_init(Object *obj) } static const VirtioPCIDeviceTypeInfo virtio_iommu_pci_info = { - .base_name = TYPE_VIRTIO_IOMMU_PCI, - .generic_name = "virtio-iommu-pci", - .non_transitional_name = "virtio-iommu-pci-non-transitional", + .generic_name = TYPE_VIRTIO_IOMMU_PCI, .instance_size = sizeof(VirtIOIOMMUPCI), .instance_init = virtio_iommu_pci_instance_init, .class_init = virtio_iommu_pci_class_init, diff --git a/hw/virtio/virtio-mem-pci.c b/hw/virtio/virtio-mem-pci.c index fa5395cd88..be2383b0c5 100644 --- a/hw/virtio/virtio-mem-pci.c +++ b/hw/virtio/virtio-mem-pci.c @@ -87,14 +87,12 @@ static void virtio_mem_pci_size_change_notify(Notifier *notifier, void *data) VirtIOMEMPCI *pci_mem = container_of(notifier, VirtIOMEMPCI, size_change_notifier); DeviceState *dev = DEVICE(pci_mem); + char *qom_path = object_get_canonical_path(OBJECT(dev)); const uint64_t * const size_p = data; - const char *id = NULL; - if (dev->id) { - id = g_strdup(dev->id); - } - - qapi_event_send_memory_device_size_change(!!id, id, *size_p); + qapi_event_send_memory_device_size_change(!!dev->id, dev->id, *size_p, + qom_path); + g_free(qom_path); } static void virtio_mem_pci_class_init(ObjectClass *klass, void *data) diff --git a/hw/virtio/virtio-mem.c b/hw/virtio/virtio-mem.c index df91e454b2..d5a578142b 100644 --- a/hw/virtio/virtio-mem.c +++ b/hw/virtio/virtio-mem.c @@ -228,6 +228,38 @@ static int virtio_mem_for_each_plugged_section(const VirtIOMEM *vmem, return ret; } +static int virtio_mem_for_each_unplugged_section(const VirtIOMEM *vmem, + MemoryRegionSection *s, + void *arg, + virtio_mem_section_cb cb) +{ + unsigned long first_bit, last_bit; + uint64_t offset, size; + int ret = 0; + + first_bit = s->offset_within_region / vmem->bitmap_size; + first_bit = find_next_zero_bit(vmem->bitmap, vmem->bitmap_size, first_bit); + while (first_bit < vmem->bitmap_size) { + MemoryRegionSection tmp = *s; + + offset = first_bit * vmem->block_size; + last_bit = find_next_bit(vmem->bitmap, vmem->bitmap_size, + first_bit + 1) - 1; + size = (last_bit - first_bit + 1) * vmem->block_size; + + if (!virito_mem_intersect_memory_section(&tmp, offset, size)) { + break; + } + ret = cb(&tmp, arg); + if (ret) { + break; + } + first_bit = find_next_zero_bit(vmem->bitmap, vmem->bitmap_size, + last_bit + 2); + } + return ret; +} + static int virtio_mem_notify_populate_cb(MemoryRegionSection *s, void *arg) { RamDiscardListener *rdl = arg; @@ -744,7 +776,6 @@ static void virtio_mem_device_realize(DeviceState *dev, Error **errp) host_memory_backend_set_mapped(vmem->memdev, true); vmstate_register_ram(&vmem->memdev->mr, DEVICE(vmem)); qemu_register_reset(virtio_mem_system_reset, vmem); - precopy_add_notifier(&vmem->precopy_notifier); /* * Set ourselves as RamDiscardManager before the plug handler maps the @@ -764,7 +795,6 @@ static void virtio_mem_device_unrealize(DeviceState *dev) * found via an address space anymore. Unset ourselves. */ memory_region_set_ram_discard_manager(&vmem->memdev->mr, NULL); - precopy_remove_notifier(&vmem->precopy_notifier); qemu_unregister_reset(virtio_mem_system_reset, vmem); vmstate_unregister_ram(&vmem->memdev->mr, DEVICE(vmem)); host_memory_backend_set_mapped(vmem->memdev, false); @@ -1057,43 +1087,11 @@ static void virtio_mem_set_block_size(Object *obj, Visitor *v, const char *name, vmem->block_size = value; } -static int virtio_mem_precopy_exclude_range_cb(const VirtIOMEM *vmem, void *arg, - uint64_t offset, uint64_t size) -{ - void * const host = qemu_ram_get_host_addr(vmem->memdev->mr.ram_block); - - qemu_guest_free_page_hint(host + offset, size); - return 0; -} - -static void virtio_mem_precopy_exclude_unplugged(VirtIOMEM *vmem) -{ - virtio_mem_for_each_unplugged_range(vmem, NULL, - virtio_mem_precopy_exclude_range_cb); -} - -static int virtio_mem_precopy_notify(NotifierWithReturn *n, void *data) -{ - VirtIOMEM *vmem = container_of(n, VirtIOMEM, precopy_notifier); - PrecopyNotifyData *pnd = data; - - switch (pnd->reason) { - case PRECOPY_NOTIFY_AFTER_BITMAP_SYNC: - virtio_mem_precopy_exclude_unplugged(vmem); - break; - default: - break; - } - - return 0; -} - static void virtio_mem_instance_init(Object *obj) { VirtIOMEM *vmem = VIRTIO_MEM(obj); notifier_list_init(&vmem->size_change_notifiers); - vmem->precopy_notifier.notify = virtio_mem_precopy_notify; QLIST_INIT(&vmem->rdl_list); object_property_add(obj, VIRTIO_MEM_SIZE_PROP, "size", virtio_mem_get_size, @@ -1170,6 +1168,31 @@ static int virtio_mem_rdm_replay_populated(const RamDiscardManager *rdm, virtio_mem_rdm_replay_populated_cb); } +static int virtio_mem_rdm_replay_discarded_cb(MemoryRegionSection *s, + void *arg) +{ + struct VirtIOMEMReplayData *data = arg; + + ((ReplayRamDiscard)data->fn)(s, data->opaque); + return 0; +} + +static void virtio_mem_rdm_replay_discarded(const RamDiscardManager *rdm, + MemoryRegionSection *s, + ReplayRamDiscard replay_fn, + void *opaque) +{ + const VirtIOMEM *vmem = VIRTIO_MEM(rdm); + struct VirtIOMEMReplayData data = { + .fn = replay_fn, + .opaque = opaque, + }; + + g_assert(s->mr == &vmem->memdev->mr); + virtio_mem_for_each_unplugged_section(vmem, s, &data, + virtio_mem_rdm_replay_discarded_cb); +} + static void virtio_mem_rdm_register_listener(RamDiscardManager *rdm, RamDiscardListener *rdl, MemoryRegionSection *s) @@ -1234,6 +1257,7 @@ static void virtio_mem_class_init(ObjectClass *klass, void *data) rdmc->get_min_granularity = virtio_mem_rdm_get_min_granularity; rdmc->is_populated = virtio_mem_rdm_is_populated; rdmc->replay_populated = virtio_mem_rdm_replay_populated; + rdmc->replay_discarded = virtio_mem_rdm_replay_discarded; rdmc->register_listener = virtio_mem_rdm_register_listener; rdmc->unregister_listener = virtio_mem_rdm_unregister_listener; } diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c index 1af48a1b04..7b3ebca178 100644 --- a/hw/virtio/virtio-mmio.c +++ b/hw/virtio/virtio-mmio.c @@ -733,8 +733,7 @@ static void virtio_mmio_realizefn(DeviceState *d, Error **errp) VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); SysBusDevice *sbd = SYS_BUS_DEVICE(d); - qbus_create_inplace(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS, - d, NULL); + qbus_init(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS, d, NULL); sysbus_init_irq(sbd, &proxy->irq); if (!kvm_eventfds_enabled()) { diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index 6e16e2705c..750aa47ec1 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -2187,8 +2187,7 @@ static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, DeviceState *qdev = DEVICE(dev); char virtio_bus_name[] = "virtio-bus"; - qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev, - virtio_bus_name); + qbus_init(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev, virtio_bus_name); } static void virtio_pci_bus_class_init(ObjectClass *klass, void *data) diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index 3a1f6c520c..cc69a9b881 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -984,28 +984,23 @@ static int virtqueue_split_read_next_desc(VirtIODevice *vdev, VRingDesc *desc, return VIRTQUEUE_READ_DESC_MORE; } +/* Called within rcu_read_lock(). */ static void virtqueue_split_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, unsigned int *out_bytes, - unsigned max_in_bytes, unsigned max_out_bytes) + unsigned max_in_bytes, unsigned max_out_bytes, + VRingMemoryRegionCaches *caches) { VirtIODevice *vdev = vq->vdev; unsigned int max, idx; unsigned int total_bufs, in_total, out_total; - VRingMemoryRegionCaches *caches; MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID; int64_t len = 0; int rc; - RCU_READ_LOCK_GUARD(); - idx = vq->last_avail_idx; total_bufs = in_total = out_total = 0; max = vq->vring.num; - caches = vring_get_region_caches(vq); - if (!caches) { - goto err; - } while ((rc = virtqueue_num_heads(vq, idx)) > 0) { MemoryRegionCache *desc_cache = &caches->desc; @@ -1124,32 +1119,28 @@ static int virtqueue_packed_read_next_desc(VirtQueue *vq, return VIRTQUEUE_READ_DESC_MORE; } +/* Called within rcu_read_lock(). */ static void virtqueue_packed_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, unsigned int *out_bytes, unsigned max_in_bytes, - unsigned max_out_bytes) + unsigned max_out_bytes, + VRingMemoryRegionCaches *caches) { VirtIODevice *vdev = vq->vdev; unsigned int max, idx; unsigned int total_bufs, in_total, out_total; MemoryRegionCache *desc_cache; - VRingMemoryRegionCaches *caches; MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID; int64_t len = 0; VRingPackedDesc desc; bool wrap_counter; - RCU_READ_LOCK_GUARD(); idx = vq->last_avail_idx; wrap_counter = vq->last_avail_wrap_counter; total_bufs = in_total = out_total = 0; max = vq->vring.num; - caches = vring_get_region_caches(vq); - if (!caches) { - goto err; - } for (;;) { unsigned int num_bufs = total_bufs; @@ -1250,6 +1241,8 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, uint16_t desc_size; VRingMemoryRegionCaches *caches; + RCU_READ_LOCK_GUARD(); + if (unlikely(!vq->vring.desc)) { goto err; } @@ -1268,10 +1261,12 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) { virtqueue_packed_get_avail_bytes(vq, in_bytes, out_bytes, - max_in_bytes, max_out_bytes); + max_in_bytes, max_out_bytes, + caches); } else { virtqueue_split_get_avail_bytes(vq, in_bytes, out_bytes, - max_in_bytes, max_out_bytes); + max_in_bytes, max_out_bytes, + caches); } return; @@ -1703,6 +1698,8 @@ static unsigned int virtqueue_packed_drop_all(VirtQueue *vq) VirtIODevice *vdev = vq->vdev; VRingPackedDesc desc; + RCU_READ_LOCK_GUARD(); + caches = vring_get_region_caches(vq); if (!caches) { return 0; @@ -3670,6 +3667,7 @@ static void virtio_device_realize(DeviceState *dev, Error **errp) } vdev->listener.commit = virtio_memory_listener_commit; + vdev->listener.name = "virtio"; memory_listener_register(&vdev->listener, vdev->dma_as); } diff --git a/hw/watchdog/sbsa_gwdt.c b/hw/watchdog/sbsa_gwdt.c index d0998f8489..e49cacd0e2 100644 --- a/hw/watchdog/sbsa_gwdt.c +++ b/hw/watchdog/sbsa_gwdt.c @@ -273,8 +273,9 @@ static void wdt_sbsa_gwdt_class_init(ObjectClass *klass, void *data) dc->realize = wdt_sbsa_gwdt_realize; dc->reset = wdt_sbsa_gwdt_reset; dc->hotpluggable = false; - set_bit(DEVICE_CATEGORY_MISC, dc->categories); + set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories); dc->vmsd = &vmstate_sbsa_gwdt; + dc->desc = "SBSA-compliant generic watchdog device"; } static const TypeInfo wdt_sbsa_gwdt_info = { diff --git a/hw/watchdog/trace-events b/hw/watchdog/trace-events index c3bafbffa9..e7523e22aa 100644 --- a/hw/watchdog/trace-events +++ b/hw/watchdog/trace-events @@ -5,3 +5,7 @@ cmsdk_apb_watchdog_read(uint64_t offset, uint64_t data, unsigned size) "CMSDK AP cmsdk_apb_watchdog_write(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB watchdog write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" cmsdk_apb_watchdog_reset(void) "CMSDK APB watchdog: reset" cmsdk_apb_watchdog_lock(uint32_t lock) "CMSDK APB watchdog: lock %" PRIu32 + +# wdt-aspeed.c +aspeed_wdt_read(uint64_t addr, uint32_t size) "@0x%" PRIx64 " size=%d" +aspeed_wdt_write(uint64_t addr, uint32_t size, uint64_t data) "@0x%" PRIx64 " size=%d value=0x%"PRIx64 diff --git a/hw/watchdog/watchdog.c b/hw/watchdog/watchdog.c index 0e98ffb73f..1437e6c5b6 100644 --- a/hw/watchdog/watchdog.c +++ b/hw/watchdog/watchdog.c @@ -76,20 +76,6 @@ int select_watchdog(const char *p) return 1; } -int select_watchdog_action(const char *p) -{ - int action; - char *qapi_value; - - qapi_value = g_ascii_strdown(p, -1); - action = qapi_enum_parse(&WatchdogAction_lookup, qapi_value, -1, NULL); - g_free(qapi_value); - if (action < 0) - return -1; - qmp_watchdog_set_action(action, &error_abort); - return 0; -} - WatchdogAction get_watchdog_action(void) { return watchdog_action; diff --git a/hw/watchdog/wdt_aspeed.c b/hw/watchdog/wdt_aspeed.c index 69c37af9a6..6aa6f90b66 100644 --- a/hw/watchdog/wdt_aspeed.c +++ b/hw/watchdog/wdt_aspeed.c @@ -19,6 +19,7 @@ #include "hw/sysbus.h" #include "hw/watchdog/wdt_aspeed.h" #include "migration/vmstate.h" +#include "trace.h" #define WDT_STATUS (0x00 / 4) #define WDT_RELOAD_VALUE (0x04 / 4) @@ -60,6 +61,8 @@ static uint64_t aspeed_wdt_read(void *opaque, hwaddr offset, unsigned size) { AspeedWDTState *s = ASPEED_WDT(opaque); + trace_aspeed_wdt_read(offset, size); + offset >>= 2; switch (offset) { @@ -140,6 +143,8 @@ static void aspeed_wdt_write(void *opaque, hwaddr offset, uint64_t data, AspeedWDTClass *awc = ASPEED_WDT_GET_CLASS(s); bool enable; + trace_aspeed_wdt_write(offset, size, data); + offset >>= 2; switch (offset) { @@ -288,9 +293,10 @@ static void aspeed_wdt_class_init(ObjectClass *klass, void *data) dc->desc = "ASPEED Watchdog Controller"; dc->realize = aspeed_wdt_realize; dc->reset = aspeed_wdt_reset; - set_bit(DEVICE_CATEGORY_MISC, dc->categories); + set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories); dc->vmsd = &vmstate_aspeed_wdt; device_class_set_props(dc, aspeed_wdt_properties); + dc->desc = "Aspeed watchdog device"; } static const TypeInfo aspeed_wdt_info = { diff --git a/hw/watchdog/wdt_diag288.c b/hw/watchdog/wdt_diag288.c index e135a4de8b..9e8882a11c 100644 --- a/hw/watchdog/wdt_diag288.c +++ b/hw/watchdog/wdt_diag288.c @@ -122,9 +122,10 @@ static void wdt_diag288_class_init(ObjectClass *klass, void *data) dc->unrealize = wdt_diag288_unrealize; dc->reset = wdt_diag288_reset; dc->hotpluggable = false; - set_bit(DEVICE_CATEGORY_MISC, dc->categories); + set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories); dc->vmsd = &vmstate_diag288; diag288->handle_timer = wdt_diag288_handle_timer; + dc->desc = "diag288 device for s390x platform"; } static const TypeInfo wdt_diag288_info = { diff --git a/hw/watchdog/wdt_i6300esb.c b/hw/watchdog/wdt_i6300esb.c index 4c52e3bb9e..f99a1c9d29 100644 --- a/hw/watchdog/wdt_i6300esb.c +++ b/hw/watchdog/wdt_i6300esb.c @@ -476,7 +476,8 @@ static void i6300esb_class_init(ObjectClass *klass, void *data) k->class_id = PCI_CLASS_SYSTEM_OTHER; dc->reset = i6300esb_reset; dc->vmsd = &vmstate_i6300esb; - set_bit(DEVICE_CATEGORY_MISC, dc->categories); + set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories); + dc->desc = "Intel 6300ESB"; } static const TypeInfo i6300esb_info = { diff --git a/hw/watchdog/wdt_ib700.c b/hw/watchdog/wdt_ib700.c index 177aaa503f..91d1bdc0da 100644 --- a/hw/watchdog/wdt_ib700.c +++ b/hw/watchdog/wdt_ib700.c @@ -140,7 +140,8 @@ static void wdt_ib700_class_init(ObjectClass *klass, void *data) dc->realize = wdt_ib700_realize; dc->reset = wdt_ib700_reset; dc->vmsd = &vmstate_ib700; - set_bit(DEVICE_CATEGORY_MISC, dc->categories); + set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories); + dc->desc = "iBASE 700"; } static const TypeInfo wdt_ib700_info = { diff --git a/hw/watchdog/wdt_imx2.c b/hw/watchdog/wdt_imx2.c index a5fb76308f..c3128370b5 100644 --- a/hw/watchdog/wdt_imx2.c +++ b/hw/watchdog/wdt_imx2.c @@ -280,8 +280,8 @@ static void imx2_wdt_class_init(ObjectClass *klass, void *data) dc->realize = imx2_wdt_realize; dc->reset = imx2_wdt_reset; dc->vmsd = &vmstate_imx2_wdt; - dc->desc = "i.MX watchdog timer"; - set_bit(DEVICE_CATEGORY_MISC, dc->categories); + dc->desc = "i.MX2 watchdog timer"; + set_bit(DEVICE_CATEGORY_WATCHDOG, dc->categories); } static const TypeInfo imx2_wdt_info = { diff --git a/hw/xen/xen-bus.c b/hw/xen/xen-bus.c index 8c588920d9..416583f130 100644 --- a/hw/xen/xen-bus.c +++ b/hw/xen/xen-bus.c @@ -1398,7 +1398,7 @@ type_init(xen_register_types) void xen_bus_init(void) { DeviceState *dev = qdev_new(TYPE_XEN_BRIDGE); - BusState *bus = qbus_create(TYPE_XEN_BUS, dev, NULL); + BusState *bus = qbus_new(TYPE_XEN_BUS, dev, NULL); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); qbus_set_bus_hotplug_handler(bus); diff --git a/hw/xen/xen-legacy-backend.c b/hw/xen/xen-legacy-backend.c index dd8ae1452d..085fd31ef7 100644 --- a/hw/xen/xen-legacy-backend.c +++ b/hw/xen/xen-legacy-backend.c @@ -276,7 +276,8 @@ static struct XenLegacyDevice *xen_be_get_xendev(const char *type, int dom, xendev = g_malloc0(ops->size); object_initialize(&xendev->qdev, ops->size, TYPE_XENBACKEND); OBJECT(xendev)->free = g_free; - qdev_set_id(DEVICE(xendev), g_strdup_printf("xen-%s-%d", type, dev)); + qdev_set_id(DEVICE(xendev), g_strdup_printf("xen-%s-%d", type, dev), + &error_fatal); qdev_realize(DEVICE(xendev), xen_sysbus, &error_fatal); object_unref(OBJECT(xendev)); @@ -702,7 +703,7 @@ int xen_be_init(void) xen_sysdev = qdev_new(TYPE_XENSYSDEV); sysbus_realize_and_unref(SYS_BUS_DEVICE(xen_sysdev), &error_fatal); - xen_sysbus = qbus_create(TYPE_XENSYSBUS, xen_sysdev, "xen-sysbus"); + xen_sysbus = qbus_new(TYPE_XENSYSBUS, xen_sysdev, "xen-sysbus"); qbus_set_bus_hotplug_handler(xen_sysbus); return 0; diff --git a/hw/xen/xen_pt.c b/hw/xen/xen_pt.c index 232482d65f..027190fa44 100644 --- a/hw/xen/xen_pt.c +++ b/hw/xen/xen_pt.c @@ -615,8 +615,8 @@ static void xen_pt_region_update(XenPCIPassthroughState *s, } args.type = d->io_regions[bar].type; - pci_for_each_device(pci_get_bus(d), pci_dev_bus_num(d), - xen_pt_check_bar_overlap, &args); + pci_for_each_device_under_bus(pci_get_bus(d), + xen_pt_check_bar_overlap, &args); if (args.rc) { XEN_PT_WARN(d, "Region: %d (addr: 0x%"FMT_PCIBUS ", len: 0x%"FMT_PCIBUS") is overlapped.\n", @@ -689,12 +689,14 @@ static void xen_pt_io_region_del(MemoryListener *l, MemoryRegionSection *sec) } static const MemoryListener xen_pt_memory_listener = { + .name = "xen-pt-mem", .region_add = xen_pt_region_add, .region_del = xen_pt_region_del, .priority = 10, }; static const MemoryListener xen_pt_io_listener = { + .name = "xen-pt-io", .region_add = xen_pt_io_region_add, .region_del = xen_pt_io_region_del, .priority = 10, diff --git a/include/block/block.h b/include/block/block.h index 740038a892..e5dd22b034 100644 --- a/include/block/block.h +++ b/include/block/block.h @@ -383,6 +383,10 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options, const char *bdref_key, Error **errp); BlockDriverState *bdrv_open(const char *filename, const char *reference, QDict *options, int flags, Error **errp); +BlockDriverState *bdrv_new_open_driver_opts(BlockDriver *drv, + const char *node_name, + QDict *options, int flags, + Error **errp); BlockDriverState *bdrv_new_open_driver(BlockDriver *drv, const char *node_name, int flags, Error **errp); BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue, @@ -751,9 +755,7 @@ bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, * bdrv_drained_begin: * * Begin a quiesced section for exclusive access to the BDS, by disabling - * external request sources including NBD server and device model. Note that - * this doesn't block timers or coroutines from submitting more requests, which - * means block_job_pause is still necessary. + * external request sources including NBD server, block jobs, and device model. * * This function can be recursive. */ diff --git a/include/block/block_int.h b/include/block/block_int.h index 5451f89b8d..f4c75e8ba9 100644 --- a/include/block/block_int.h +++ b/include/block/block_int.h @@ -94,6 +94,9 @@ typedef struct BdrvTrackedRequest { struct BdrvTrackedRequest *waiting_for; } BdrvTrackedRequest; +int bdrv_check_qiov_request(int64_t offset, int64_t bytes, + QEMUIOVector *qiov, size_t qiov_offset, + Error **errp); int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp); struct BlockDriver { @@ -232,11 +235,11 @@ struct BlockDriver { /* aio */ BlockAIOCB *(*bdrv_aio_preadv)(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags, - BlockCompletionFunc *cb, void *opaque); + int64_t offset, int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque); BlockAIOCB *(*bdrv_aio_pwritev)(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags, - BlockCompletionFunc *cb, void *opaque); + int64_t offset, int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque); BlockAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs, BlockCompletionFunc *cb, void *opaque); BlockAIOCB *(*bdrv_aio_pdiscard)(BlockDriverState *bs, @@ -262,10 +265,11 @@ struct BlockDriver { * The buffer in @qiov may point directly to guest memory. */ int coroutine_fn (*bdrv_co_preadv)(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags); + int64_t offset, int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags); int coroutine_fn (*bdrv_co_preadv_part)(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, size_t qiov_offset, int flags); + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags); int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs, int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int flags); /** @@ -284,10 +288,11 @@ struct BlockDriver { * The buffer in @qiov may point directly to guest memory. */ int coroutine_fn (*bdrv_co_pwritev)(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags); + int64_t offset, int64_t bytes, QEMUIOVector *qiov, + BdrvRequestFlags flags); int coroutine_fn (*bdrv_co_pwritev_part)(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, size_t qiov_offset, int flags); + int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset, + BdrvRequestFlags flags); /* * Efficiently zero a region of the disk image. Typically an image format @@ -296,9 +301,9 @@ struct BlockDriver { * will be called instead. */ int coroutine_fn (*bdrv_co_pwrite_zeroes)(BlockDriverState *bs, - int64_t offset, int bytes, BdrvRequestFlags flags); + int64_t offset, int64_t bytes, BdrvRequestFlags flags); int coroutine_fn (*bdrv_co_pdiscard)(BlockDriverState *bs, - int64_t offset, int bytes); + int64_t offset, int64_t bytes); /* Map [offset, offset + nbytes) range onto a child of @bs to copy from, * and invoke bdrv_co_copy_range_from(child, ...), or invoke @@ -309,10 +314,10 @@ struct BlockDriver { */ int coroutine_fn (*bdrv_co_copy_range_from)(BlockDriverState *bs, BdrvChild *src, - uint64_t offset, + int64_t offset, BdrvChild *dst, - uint64_t dst_offset, - uint64_t bytes, + int64_t dst_offset, + int64_t bytes, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags); @@ -326,10 +331,10 @@ struct BlockDriver { */ int coroutine_fn (*bdrv_co_copy_range_to)(BlockDriverState *bs, BdrvChild *src, - uint64_t src_offset, + int64_t src_offset, BdrvChild *dst, - uint64_t dst_offset, - uint64_t bytes, + int64_t dst_offset, + int64_t bytes, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags); @@ -434,10 +439,9 @@ struct BlockDriver { Error **errp); int coroutine_fn (*bdrv_co_pwritev_compressed)(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, QEMUIOVector *qiov); + int64_t offset, int64_t bytes, QEMUIOVector *qiov); int coroutine_fn (*bdrv_co_pwritev_compressed_part)(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, - size_t qiov_offset); + int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset); int (*bdrv_snapshot_create)(BlockDriverState *bs, QEMUSnapshotInfo *sn_info); @@ -670,11 +674,12 @@ typedef struct BlockLimits { * otherwise. */ uint32_t request_alignment; - /* Maximum number of bytes that can be discarded at once (since it - * is signed, it must be < 2G, if set). Must be multiple of - * pdiscard_alignment, but need not be power of 2. May be 0 if no - * inherent 32-bit limit */ - int32_t max_pdiscard; + /* + * Maximum number of bytes that can be discarded at once. Must be multiple + * of pdiscard_alignment, but need not be power of 2. May be 0 if no + * inherent 64-bit limit. + */ + int64_t max_pdiscard; /* Optimal alignment for discard requests in bytes. A power of 2 * is best but not mandatory. Must be a multiple of @@ -682,10 +687,11 @@ typedef struct BlockLimits { * that is set. May be 0 if bl.request_alignment is good enough */ uint32_t pdiscard_alignment; - /* Maximum number of bytes that can zeroized at once (since it is - * signed, it must be < 2G, if set). Must be multiple of - * pwrite_zeroes_alignment. May be 0 if no inherent 32-bit limit */ - int32_t max_pwrite_zeroes; + /* + * Maximum number of bytes that can zeroized at once. Must be multiple of + * pwrite_zeroes_alignment. 0 means no limit. + */ + int64_t max_pwrite_zeroes; /* Optimal alignment for write zeroes requests in bytes. A power * of 2 is best but not mandatory. Must be a multiple of @@ -712,6 +718,13 @@ typedef struct BlockLimits { */ uint64_t max_hw_transfer; + /* Maximal number of scatter/gather elements allowed by the hardware. + * Applies whenever transfers to the device bypass the kernel I/O + * scheduler, for example with SG_IO. If larger than max_iov + * or if zero, blk_get_max_hw_iov will fall back to max_iov. + */ + int max_hw_iov; + /* memory alignment, in bytes so that no bounce buffer is needed */ size_t min_mem_alignment; diff --git a/include/block/raw-aio.h b/include/block/raw-aio.h index 251b10d273..21fc10c4c9 100644 --- a/include/block/raw-aio.h +++ b/include/block/raw-aio.h @@ -51,11 +51,13 @@ typedef struct LinuxAioState LinuxAioState; LinuxAioState *laio_init(Error **errp); void laio_cleanup(LinuxAioState *s); int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd, - uint64_t offset, QEMUIOVector *qiov, int type); + uint64_t offset, QEMUIOVector *qiov, int type, + uint64_t dev_max_batch); void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context); void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context); void laio_io_plug(BlockDriverState *bs, LinuxAioState *s); -void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s); +void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s, + uint64_t dev_max_batch); #endif /* io_uring.c - Linux io_uring implementation */ #ifdef CONFIG_LINUX_IO_URING diff --git a/include/disas/dis-asm.h b/include/disas/dis-asm.h index 524f29196d..08e1beec85 100644 --- a/include/disas/dis-asm.h +++ b/include/disas/dis-asm.h @@ -455,8 +455,7 @@ int print_insn_crisv32 (bfd_vma, disassemble_info*); int print_insn_crisv10 (bfd_vma, disassemble_info*); int print_insn_microblaze (bfd_vma, disassemble_info*); int print_insn_ia64 (bfd_vma, disassemble_info*); -int print_insn_big_nios2 (bfd_vma, disassemble_info*); -int print_insn_little_nios2 (bfd_vma, disassemble_info*); +int print_insn_nios2(bfd_vma, disassemble_info*); int print_insn_xtensa (bfd_vma, disassemble_info*); int print_insn_riscv32 (bfd_vma, disassemble_info*); int print_insn_riscv64 (bfd_vma, disassemble_info*); diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h index 32cfb634c6..3c8e24292b 100644 --- a/include/exec/cpu-all.h +++ b/include/exec/cpu-all.h @@ -429,10 +429,10 @@ static inline bool tlb_hit(target_ulong tlb_addr, target_ulong addr) #ifdef CONFIG_TCG /* accel/tcg/cpu-exec.c */ -void dump_drift_info(void); +void dump_drift_info(GString *buf); /* accel/tcg/translate-all.c */ -void dump_exec_info(void); -void dump_opcount_info(void); +void dump_exec_info(GString *buf); +void dump_opcount_info(GString *buf); #endif /* CONFIG_TCG */ #endif /* !CONFIG_USER_ONLY */ diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h index ce6ce82618..a878fd0105 100644 --- a/include/exec/cpu_ldst.h +++ b/include/exec/cpu_ldst.h @@ -28,10 +28,12 @@ * load: cpu_ld{sign}{size}{end}_{mmusuffix}(env, ptr) * cpu_ld{sign}{size}{end}_{mmusuffix}_ra(env, ptr, retaddr) * cpu_ld{sign}{size}{end}_mmuidx_ra(env, ptr, mmu_idx, retaddr) + * cpu_ld{sign}{size}{end}_mmu(env, ptr, oi, retaddr) * * store: cpu_st{size}{end}_{mmusuffix}(env, ptr, val) * cpu_st{size}{end}_{mmusuffix}_ra(env, ptr, val, retaddr) * cpu_st{size}{end}_mmuidx_ra(env, ptr, val, mmu_idx, retaddr) + * cpu_st{size}{end}_mmu(env, ptr, val, oi, retaddr) * * sign is: * (empty): for 32 and 64 bit sizes @@ -53,10 +55,16 @@ * The "mmuidx" suffix carries an extra mmu_idx argument that specifies * the index to use; the "data" and "code" suffixes take the index from * cpu_mmu_index(). + * + * The "mmu" suffix carries the full MemOpIdx, with both mmu_idx and the + * MemOp including alignment requirements. The alignment will be enforced. */ #ifndef CPU_LDST_H #define CPU_LDST_H +#include "exec/memopidx.h" +#include "qemu/int128.h" + #if defined(CONFIG_USER_ONLY) /* sparc32plus has 64bit long but 32bit space address * this can make bad result with g2h() and h2g() @@ -118,12 +126,10 @@ typedef target_ulong abi_ptr; uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr ptr); int cpu_ldsb_data(CPUArchState *env, abi_ptr ptr); - uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr ptr); int cpu_ldsw_be_data(CPUArchState *env, abi_ptr ptr); uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr ptr); uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr ptr); - uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr ptr); int cpu_ldsw_le_data(CPUArchState *env, abi_ptr ptr); uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr ptr); @@ -131,37 +137,31 @@ uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr ptr); uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); - uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); - uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr ptr, uintptr_t ra); void cpu_stb_data(CPUArchState *env, abi_ptr ptr, uint32_t val); - void cpu_stw_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val); void cpu_stl_be_data(CPUArchState *env, abi_ptr ptr, uint32_t val); void cpu_stq_be_data(CPUArchState *env, abi_ptr ptr, uint64_t val); - void cpu_stw_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val); void cpu_stl_le_data(CPUArchState *env, abi_ptr ptr, uint32_t val); void cpu_stq_le_data(CPUArchState *env, abi_ptr ptr, uint64_t val); void cpu_stb_data_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, uintptr_t ra); - void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, uintptr_t ra); void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, uintptr_t ra); void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr ptr, uint64_t val, uintptr_t ra); - void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, uintptr_t ra); void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr, @@ -169,6 +169,157 @@ void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr ptr, void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr ptr, uint64_t val, uintptr_t ra); +uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr ptr, + int mmu_idx, uintptr_t ra); +int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, + int mmu_idx, uintptr_t ra); +uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, + int mmu_idx, uintptr_t ra); +int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, + int mmu_idx, uintptr_t ra); +uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, + int mmu_idx, uintptr_t ra); +uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, + int mmu_idx, uintptr_t ra); +uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, + int mmu_idx, uintptr_t ra); +int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, + int mmu_idx, uintptr_t ra); +uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, + int mmu_idx, uintptr_t ra); +uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, + int mmu_idx, uintptr_t ra); + +void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, + int mmu_idx, uintptr_t ra); +void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, + int mmu_idx, uintptr_t ra); +void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, + int mmu_idx, uintptr_t ra); +void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val, + int mmu_idx, uintptr_t ra); +void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, + int mmu_idx, uintptr_t ra); +void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint32_t val, + int mmu_idx, uintptr_t ra); +void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr ptr, uint64_t val, + int mmu_idx, uintptr_t ra); + +uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr ptr, MemOpIdx oi, uintptr_t ra); +uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr ptr, + MemOpIdx oi, uintptr_t ra); +uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr ptr, + MemOpIdx oi, uintptr_t ra); +uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr ptr, + MemOpIdx oi, uintptr_t ra); +uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr ptr, + MemOpIdx oi, uintptr_t ra); +uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr ptr, + MemOpIdx oi, uintptr_t ra); +uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr ptr, + MemOpIdx oi, uintptr_t ra); + +void cpu_stb_mmu(CPUArchState *env, abi_ptr ptr, uint8_t val, + MemOpIdx oi, uintptr_t ra); +void cpu_stw_be_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val, + MemOpIdx oi, uintptr_t ra); +void cpu_stl_be_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val, + MemOpIdx oi, uintptr_t ra); +void cpu_stq_be_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val, + MemOpIdx oi, uintptr_t ra); +void cpu_stw_le_mmu(CPUArchState *env, abi_ptr ptr, uint16_t val, + MemOpIdx oi, uintptr_t ra); +void cpu_stl_le_mmu(CPUArchState *env, abi_ptr ptr, uint32_t val, + MemOpIdx oi, uintptr_t ra); +void cpu_stq_le_mmu(CPUArchState *env, abi_ptr ptr, uint64_t val, + MemOpIdx oi, uintptr_t ra); + +uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr, + uint32_t cmpv, uint32_t newv, + MemOpIdx oi, uintptr_t retaddr); +uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr, + uint32_t cmpv, uint32_t newv, + MemOpIdx oi, uintptr_t retaddr); +uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr, + uint32_t cmpv, uint32_t newv, + MemOpIdx oi, uintptr_t retaddr); +uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr, + uint64_t cmpv, uint64_t newv, + MemOpIdx oi, uintptr_t retaddr); +uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr, + uint32_t cmpv, uint32_t newv, + MemOpIdx oi, uintptr_t retaddr); +uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr, + uint32_t cmpv, uint32_t newv, + MemOpIdx oi, uintptr_t retaddr); +uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr, + uint64_t cmpv, uint64_t newv, + MemOpIdx oi, uintptr_t retaddr); + +#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \ +TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \ + (CPUArchState *env, target_ulong addr, TYPE val, \ + MemOpIdx oi, uintptr_t retaddr); + +#ifdef CONFIG_ATOMIC64 +#define GEN_ATOMIC_HELPER_ALL(NAME) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, b) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \ + GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \ + GEN_ATOMIC_HELPER(NAME, uint64_t, q_be) +#else +#define GEN_ATOMIC_HELPER_ALL(NAME) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, b) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \ + GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) +#endif + +GEN_ATOMIC_HELPER_ALL(fetch_add) +GEN_ATOMIC_HELPER_ALL(fetch_sub) +GEN_ATOMIC_HELPER_ALL(fetch_and) +GEN_ATOMIC_HELPER_ALL(fetch_or) +GEN_ATOMIC_HELPER_ALL(fetch_xor) +GEN_ATOMIC_HELPER_ALL(fetch_smin) +GEN_ATOMIC_HELPER_ALL(fetch_umin) +GEN_ATOMIC_HELPER_ALL(fetch_smax) +GEN_ATOMIC_HELPER_ALL(fetch_umax) + +GEN_ATOMIC_HELPER_ALL(add_fetch) +GEN_ATOMIC_HELPER_ALL(sub_fetch) +GEN_ATOMIC_HELPER_ALL(and_fetch) +GEN_ATOMIC_HELPER_ALL(or_fetch) +GEN_ATOMIC_HELPER_ALL(xor_fetch) +GEN_ATOMIC_HELPER_ALL(smin_fetch) +GEN_ATOMIC_HELPER_ALL(umin_fetch) +GEN_ATOMIC_HELPER_ALL(smax_fetch) +GEN_ATOMIC_HELPER_ALL(umax_fetch) + +GEN_ATOMIC_HELPER_ALL(xchg) + +#undef GEN_ATOMIC_HELPER_ALL +#undef GEN_ATOMIC_HELPER + +Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr, + Int128 cmpv, Int128 newv, + MemOpIdx oi, uintptr_t retaddr); +Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr, + Int128 cmpv, Int128 newv, + MemOpIdx oi, uintptr_t retaddr); + +Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val, + MemOpIdx oi, uintptr_t retaddr); +void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val, + MemOpIdx oi, uintptr_t retaddr); + #if defined(CONFIG_USER_ONLY) extern __thread uintptr_t helper_retaddr; @@ -193,119 +344,6 @@ static inline void clear_helper_retaddr(void) helper_retaddr = 0; } -/* - * Provide the same *_mmuidx_ra interface as for softmmu. - * The mmu_idx argument is ignored. - */ - -static inline uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return cpu_ldub_data_ra(env, addr, ra); -} - -static inline int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return cpu_ldsb_data_ra(env, addr, ra); -} - -static inline uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return cpu_lduw_be_data_ra(env, addr, ra); -} - -static inline int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return cpu_ldsw_be_data_ra(env, addr, ra); -} - -static inline uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return cpu_ldl_be_data_ra(env, addr, ra); -} - -static inline uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return cpu_ldq_be_data_ra(env, addr, ra); -} - -static inline uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return cpu_lduw_le_data_ra(env, addr, ra); -} - -static inline int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return cpu_ldsw_le_data_ra(env, addr, ra); -} - -static inline uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return cpu_ldl_le_data_ra(env, addr, ra); -} - -static inline uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra) -{ - return cpu_ldq_le_data_ra(env, addr, ra); -} - -static inline void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, - uint32_t val, int mmu_idx, uintptr_t ra) -{ - cpu_stb_data_ra(env, addr, val, ra); -} - -static inline void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - uint32_t val, int mmu_idx, - uintptr_t ra) -{ - cpu_stw_be_data_ra(env, addr, val, ra); -} - -static inline void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - uint32_t val, int mmu_idx, - uintptr_t ra) -{ - cpu_stl_be_data_ra(env, addr, val, ra); -} - -static inline void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - uint64_t val, int mmu_idx, - uintptr_t ra) -{ - cpu_stq_be_data_ra(env, addr, val, ra); -} - -static inline void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - uint32_t val, int mmu_idx, - uintptr_t ra) -{ - cpu_stw_le_data_ra(env, addr, val, ra); -} - -static inline void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - uint32_t val, int mmu_idx, - uintptr_t ra) -{ - cpu_stl_le_data_ra(env, addr, val, ra); -} - -static inline void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - uint64_t val, int mmu_idx, - uintptr_t ra) -{ - cpu_stq_le_data_ra(env, addr, val, ra); -} - #else /* Needed for TCG_OVERSIZED_GUEST */ @@ -336,46 +374,6 @@ static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx, return &env_tlb(env)->f[mmu_idx].table[tlb_index(env, mmu_idx, addr)]; } -uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra); -int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra); - -uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra); -int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra); -uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra); -uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra); - -uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra); -int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra); -uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra); -uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, - int mmu_idx, uintptr_t ra); - -void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, - int mmu_idx, uintptr_t retaddr); - -void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, - int mmu_idx, uintptr_t retaddr); -void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, - int mmu_idx, uintptr_t retaddr); -void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, - int mmu_idx, uintptr_t retaddr); - -void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, - int mmu_idx, uintptr_t retaddr); -void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val, - int mmu_idx, uintptr_t retaddr); -void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, - int mmu_idx, uintptr_t retaddr); - #endif /* defined(CONFIG_USER_ONLY) */ #ifdef TARGET_WORDS_BIGENDIAN @@ -391,6 +389,9 @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, # define cpu_ldsw_mmuidx_ra cpu_ldsw_be_mmuidx_ra # define cpu_ldl_mmuidx_ra cpu_ldl_be_mmuidx_ra # define cpu_ldq_mmuidx_ra cpu_ldq_be_mmuidx_ra +# define cpu_ldw_mmu cpu_ldw_be_mmu +# define cpu_ldl_mmu cpu_ldl_be_mmu +# define cpu_ldq_mmu cpu_ldq_be_mmu # define cpu_stw_data cpu_stw_be_data # define cpu_stl_data cpu_stl_be_data # define cpu_stq_data cpu_stq_be_data @@ -400,6 +401,9 @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, # define cpu_stw_mmuidx_ra cpu_stw_be_mmuidx_ra # define cpu_stl_mmuidx_ra cpu_stl_be_mmuidx_ra # define cpu_stq_mmuidx_ra cpu_stq_be_mmuidx_ra +# define cpu_stw_mmu cpu_stw_be_mmu +# define cpu_stl_mmu cpu_stl_be_mmu +# define cpu_stq_mmu cpu_stq_be_mmu #else # define cpu_lduw_data cpu_lduw_le_data # define cpu_ldsw_data cpu_ldsw_le_data @@ -413,6 +417,9 @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, # define cpu_ldsw_mmuidx_ra cpu_ldsw_le_mmuidx_ra # define cpu_ldl_mmuidx_ra cpu_ldl_le_mmuidx_ra # define cpu_ldq_mmuidx_ra cpu_ldq_le_mmuidx_ra +# define cpu_ldw_mmu cpu_ldw_le_mmu +# define cpu_ldl_mmu cpu_ldl_le_mmu +# define cpu_ldq_mmu cpu_ldq_le_mmu # define cpu_stw_data cpu_stw_le_data # define cpu_stl_data cpu_stl_le_data # define cpu_stq_data cpu_stq_le_data @@ -422,6 +429,9 @@ void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val, # define cpu_stw_mmuidx_ra cpu_stw_le_mmuidx_ra # define cpu_stl_mmuidx_ra cpu_stl_le_mmuidx_ra # define cpu_stq_mmuidx_ra cpu_stq_le_mmuidx_ra +# define cpu_stw_mmu cpu_stw_le_mmu +# define cpu_stl_mmu cpu_stl_le_mmu +# define cpu_stq_mmu cpu_stq_le_mmu #endif uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr); diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 9d5987ba04..6bb2a0f7ec 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -664,16 +664,55 @@ static inline tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, } /** - * cpu_signal_handler - * @signum: host signal number - * @pinfo: host siginfo_t - * @puc: host ucontext_t + * adjust_signal_pc: + * @pc: raw pc from the host signal ucontext_t. + * @is_write: host memory operation was write, or read-modify-write. * - * To be called from the SIGBUS and SIGSEGV signal handler to inform the - * virtual cpu of exceptions. Returns true if the signal was handled by - * the virtual CPU. + * Alter @pc as required for unwinding. Return the type of the + * guest memory access -- host reads may be for guest execution. */ -int cpu_signal_handler(int signum, void *pinfo, void *puc); +MMUAccessType adjust_signal_pc(uintptr_t *pc, bool is_write); + +/** + * handle_sigsegv_accerr_write: + * @cpu: the cpu context + * @old_set: the sigset_t from the signal ucontext_t + * @host_pc: the host pc, adjusted for the signal + * @host_addr: the host address of the fault + * + * Return true if the write fault has been handled, and should be re-tried. + */ +bool handle_sigsegv_accerr_write(CPUState *cpu, sigset_t *old_set, + uintptr_t host_pc, abi_ptr guest_addr); + +/** + * cpu_loop_exit_sigsegv: + * @cpu: the cpu context + * @addr: the guest address of the fault + * @access_type: access was read/write/execute + * @maperr: true for invalid page, false for permission fault + * @ra: host pc for unwinding + * + * Use the TCGCPUOps hook to record cpu state, do guest operating system + * specific things to raise SIGSEGV, and jump to the main cpu loop. + */ +void QEMU_NORETURN cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr, + MMUAccessType access_type, + bool maperr, uintptr_t ra); + +/** + * cpu_loop_exit_sigbus: + * @cpu: the cpu context + * @addr: the guest address of the alignment fault + * @access_type: access was read/write/execute + * @ra: host pc for unwinding + * + * Use the TCGCPUOps hook to record cpu state, do guest operating system + * specific things to raise SIGBUS, and jump to the main cpu loop. + */ +void QEMU_NORETURN cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr, + MMUAccessType access_type, + uintptr_t ra); #else static inline void mmap_lock(void) {} diff --git a/include/exec/memop.h b/include/exec/memop.h index 529d07b02d..04264ffd6b 100644 --- a/include/exec/memop.h +++ b/include/exec/memop.h @@ -19,11 +19,15 @@ typedef enum MemOp { MO_16 = 1, MO_32 = 2, MO_64 = 3, - MO_SIZE = 3, /* Mask for the above. */ + MO_128 = 4, + MO_256 = 5, + MO_512 = 6, + MO_1024 = 7, + MO_SIZE = 0x07, /* Mask for the above. */ - MO_SIGN = 4, /* Sign-extended, otherwise zero-extended. */ + MO_SIGN = 0x08, /* Sign-extended, otherwise zero-extended. */ - MO_BSWAP = 8, /* Host reverse endian. */ + MO_BSWAP = 0x10, /* Host reverse endian. */ #ifdef HOST_WORDS_BIGENDIAN MO_LE = MO_BSWAP, MO_BE = 0, @@ -59,8 +63,8 @@ typedef enum MemOp { * - an alignment to a specified size, which may be more or less than * the access size (MO_ALIGN_x where 'x' is a size in bytes); */ - MO_ASHIFT = 4, - MO_AMASK = 7 << MO_ASHIFT, + MO_ASHIFT = 5, + MO_AMASK = 0x7 << MO_ASHIFT, #ifdef NEED_CPU_H #ifdef TARGET_ALIGNED_ONLY MO_ALIGN = 0, diff --git a/include/exec/memopidx.h b/include/exec/memopidx.h new file mode 100644 index 0000000000..83bce97874 --- /dev/null +++ b/include/exec/memopidx.h @@ -0,0 +1,55 @@ +/* + * Combine the MemOp and mmu_idx parameters into a single value. + * + * Authors: + * Richard Henderson + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef EXEC_MEMOPIDX_H +#define EXEC_MEMOPIDX_H 1 + +#include "exec/memop.h" + +typedef uint32_t MemOpIdx; + +/** + * make_memop_idx + * @op: memory operation + * @idx: mmu index + * + * Encode these values into a single parameter. + */ +static inline MemOpIdx make_memop_idx(MemOp op, unsigned idx) +{ +#ifdef CONFIG_DEBUG_TCG + assert(idx <= 15); +#endif + return (op << 4) | idx; +} + +/** + * get_memop + * @oi: combined op/idx parameter + * + * Extract the memory operation from the combined value. + */ +static inline MemOp get_memop(MemOpIdx oi) +{ + return oi >> 4; +} + +/** + * get_mmuidx + * @oi: combined op/idx parameter + * + * Extract the mmu index from the combined value. + */ +static inline unsigned get_mmuidx(MemOpIdx oi) +{ + return oi & 15; +} + +#endif diff --git a/include/exec/memory.h b/include/exec/memory.h index c3d417d317..20f1b27377 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -61,7 +61,17 @@ static inline void fuzz_dma_read_cb(size_t addr, } #endif -extern bool global_dirty_log; +/* Possible bits for global_dirty_log_{start|stop} */ + +/* Dirty tracking enabled because migration is running */ +#define GLOBAL_DIRTY_MIGRATION (1U << 0) + +/* Dirty tracking enabled because measuring dirty rate */ +#define GLOBAL_DIRTY_DIRTY_RATE (1U << 1) + +#define GLOBAL_DIRTY_MASK (0x3) + +extern unsigned int global_dirty_tracking; typedef struct MemoryRegionOps MemoryRegionOps; @@ -190,6 +200,9 @@ typedef struct IOMMUTLBEvent { */ #define RAM_NORESERVE (1 << 7) +/* RAM that isn't accessible through normal means. */ +#define RAM_PROTECTED (1 << 8) + static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn, IOMMUNotifierFlag flags, hwaddr start, hwaddr end, @@ -537,6 +550,7 @@ static inline void ram_discard_listener_init(RamDiscardListener *rdl, } typedef int (*ReplayRamPopulate)(MemoryRegionSection *section, void *opaque); +typedef void (*ReplayRamDiscard)(MemoryRegionSection *section, void *opaque); /* * RamDiscardManagerClass: @@ -625,6 +639,21 @@ struct RamDiscardManagerClass { MemoryRegionSection *section, ReplayRamPopulate replay_fn, void *opaque); + /** + * @replay_discarded: + * + * Call the #ReplayRamDiscard callback for all discarded parts within the + * #MemoryRegionSection via the #RamDiscardManager. + * + * @rdm: the #RamDiscardManager + * @section: the #MemoryRegionSection + * @replay_fn: the #ReplayRamDiscard callback + * @opaque: pointer to forward to the callback + */ + void (*replay_discarded)(const RamDiscardManager *rdm, + MemoryRegionSection *section, + ReplayRamDiscard replay_fn, void *opaque); + /** * @register_listener: * @@ -669,6 +698,11 @@ int ram_discard_manager_replay_populated(const RamDiscardManager *rdm, ReplayRamPopulate replay_fn, void *opaque); +void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm, + MemoryRegionSection *section, + ReplayRamDiscard replay_fn, + void *opaque); + void ram_discard_manager_register_listener(RamDiscardManager *rdm, RamDiscardListener *rdl, MemoryRegionSection *section); @@ -979,6 +1013,14 @@ struct MemoryListener { */ unsigned priority; + /** + * @name: + * + * Name of the listener. It can be used in contexts where we'd like to + * identify one memory listener with the rest. + */ + const char *name; + /* private: */ AddressSpace *address_space; QTAILQ_ENTRY(MemoryListener) link; @@ -1267,7 +1309,7 @@ void memory_region_init_ram_from_file(MemoryRegion *mr, * @name: the name of the region. * @size: size of the region. * @ram_flags: RamBlock flags. Supported flags: RAM_SHARED, RAM_PMEM, - * RAM_NORESERVE. + * RAM_NORESERVE, RAM_PROTECTED. * @fd: the fd to mmap. * @offset: offset within the file referenced by fd * @errp: pointer to Error*, to store an error if it happens. @@ -1568,6 +1610,16 @@ static inline bool memory_region_is_romd(MemoryRegion *mr) return mr->rom_device && mr->romd_mode; } +/** + * memory_region_is_protected: check whether a memory region is protected + * + * Returns %true if a memory region is protected RAM and cannot be accessed + * via standard mechanisms, e.g. DMA. + * + * @mr: the memory region being queried + */ +bool memory_region_is_protected(MemoryRegion *mr); + /** * memory_region_get_iommu: check whether a memory region is an iommu * @@ -2367,13 +2419,17 @@ void memory_listener_unregister(MemoryListener *listener); /** * memory_global_dirty_log_start: begin dirty logging for all regions + * + * @flags: purpose of starting dirty log, migration or dirty rate */ -void memory_global_dirty_log_start(void); +void memory_global_dirty_log_start(unsigned int flags); /** * memory_global_dirty_log_stop: end dirty logging for all regions + * + * @flags: purpose of stopping dirty log, migration or dirty rate */ -void memory_global_dirty_log_stop(void); +void memory_global_dirty_log_stop(unsigned int flags); void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled); diff --git a/include/exec/plugin-gen.h b/include/exec/plugin-gen.h index b1b72b5d90..f92f169739 100644 --- a/include/exec/plugin-gen.h +++ b/include/exec/plugin-gen.h @@ -27,13 +27,21 @@ void plugin_gen_insn_end(void); void plugin_gen_disable_mem_helpers(void); void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info); -static inline void plugin_insn_append(const void *from, size_t size) +static inline void plugin_insn_append(abi_ptr pc, const void *from, size_t size) { struct qemu_plugin_insn *insn = tcg_ctx->plugin_insn; + abi_ptr off; if (insn == NULL) { return; } + off = pc - insn->vaddr; + if (off < insn->data->len) { + g_byte_array_set_size(insn->data, off); + } else if (off > insn->data->len) { + /* we have an unexpected gap */ + g_assert_not_reached(); + } insn->data = g_byte_array_append(insn->data, from, size); } @@ -62,7 +70,7 @@ static inline void plugin_gen_disable_mem_helpers(void) static inline void plugin_gen_empty_mem_callback(TCGv addr, uint32_t info) { } -static inline void plugin_insn_append(const void *from, size_t size) +static inline void plugin_insn_append(abi_ptr pc, const void *from, size_t size) { } #endif /* CONFIG_PLUGIN */ diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index 551876bed0..64fb936c7c 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -26,6 +26,8 @@ #include "exec/ramlist.h" #include "exec/ramblock.h" +extern uint64_t total_dirty_pages; + /** * clear_bmap_size: calculate clear bitmap size * @@ -369,10 +371,14 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, qatomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp); - if (global_dirty_log) { + if (global_dirty_tracking) { qatomic_or( &blocks[DIRTY_MEMORY_MIGRATION][idx][offset], temp); + if (unlikely( + global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) { + total_dirty_pages += ctpopl(temp); + } } if (tcg_enabled()) { @@ -392,7 +398,7 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, } else { uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE; - if (!global_dirty_log) { + if (!global_dirty_tracking) { clients &= ~(1 << DIRTY_MEMORY_MIGRATION); } @@ -403,6 +409,9 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, for (i = 0; i < len; i++) { if (bitmap[i] != 0) { c = leul_to_cpu(bitmap[i]); + if (unlikely(global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) { + total_dirty_pages += ctpopl(c); + } do { j = ctzl(c); c &= ~(1ul << j); diff --git a/include/exec/ramlist.h b/include/exec/ramlist.h index ece6497ee2..2ad2a81acc 100644 --- a/include/exec/ramlist.h +++ b/include/exec/ramlist.h @@ -80,6 +80,6 @@ void ram_block_notify_add(void *host, size_t size, size_t max_size); void ram_block_notify_remove(void *host, size_t size, size_t max_size); void ram_block_notify_resize(void *host, size_t old_size, size_t new_size); -void ram_block_dump(Monitor *mon); +GString *ram_block_format(void); #endif /* RAMLIST_H */ diff --git a/include/fpu/softfloat-macros.h b/include/fpu/softfloat-macros.h index 81c3fe8256..f35cdbfa63 100644 --- a/include/fpu/softfloat-macros.h +++ b/include/fpu/softfloat-macros.h @@ -8,7 +8,6 @@ * so some portions are provided under: * the SoftFloat-2a license * the BSD license - * GPL-v2-or-later * * Any future contributions to this file after December 1st 2014 will be * taken to be licensed under the Softfloat-2a license unless specifically @@ -75,10 +74,6 @@ this code that are retained. * THE POSSIBILITY OF SUCH DAMAGE. */ -/* Portions of this work are licensed under the terms of the GNU GPL, - * version 2 or later. See the COPYING file in the top-level directory. - */ - #ifndef FPU_SOFTFLOAT_MACROS_H #define FPU_SOFTFLOAT_MACROS_H @@ -585,83 +580,6 @@ static inline uint64_t estimateDiv128To64(uint64_t a0, uint64_t a1, uint64_t b) } -/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd - * (https://gmplib.org/repo/gmp/file/tip/longlong.h) - * - * Licensed under the GPLv2/LGPLv3 - */ -static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1, - uint64_t n0, uint64_t d) -{ -#if defined(__x86_64__) - uint64_t q; - asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d)); - return q; -#elif defined(__s390x__) && !defined(__clang__) - /* Need to use a TImode type to get an even register pair for DLGR. */ - unsigned __int128 n = (unsigned __int128)n1 << 64 | n0; - asm("dlgr %0, %1" : "+r"(n) : "r"(d)); - *r = n >> 64; - return n; -#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7) - /* From Power ISA 2.06, programming note for divdeu. */ - uint64_t q1, q2, Q, r1, r2, R; - asm("divdeu %0,%2,%4; divdu %1,%3,%4" - : "=&r"(q1), "=r"(q2) - : "r"(n1), "r"(n0), "r"(d)); - r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */ - r2 = n0 - (q2 * d); - Q = q1 + q2; - R = r1 + r2; - if (R >= d || R < r2) { /* overflow implies R > d */ - Q += 1; - R -= d; - } - *r = R; - return Q; -#else - uint64_t d0, d1, q0, q1, r1, r0, m; - - d0 = (uint32_t)d; - d1 = d >> 32; - - r1 = n1 % d1; - q1 = n1 / d1; - m = q1 * d0; - r1 = (r1 << 32) | (n0 >> 32); - if (r1 < m) { - q1 -= 1; - r1 += d; - if (r1 >= d) { - if (r1 < m) { - q1 -= 1; - r1 += d; - } - } - } - r1 -= m; - - r0 = r1 % d1; - q0 = r1 / d1; - m = q0 * d0; - r0 = (r0 << 32) | (uint32_t)n0; - if (r0 < m) { - q0 -= 1; - r0 += d; - if (r0 >= d) { - if (r0 < m) { - q0 -= 1; - r0 += d; - } - } - } - r0 -= m; - - *r = r0; - return (q1 << 32) | q0; -#endif -} - /*---------------------------------------------------------------------------- | Returns an approximation to the square root of the 32-bit significand given | by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of diff --git a/include/fpu/softfloat.h b/include/fpu/softfloat.h index ec7dca0960..a249991e61 100644 --- a/include/fpu/softfloat.h +++ b/include/fpu/softfloat.h @@ -243,6 +243,8 @@ float16 float16_minnum(float16, float16, float_status *status); float16 float16_maxnum(float16, float16, float_status *status); float16 float16_minnummag(float16, float16, float_status *status); float16 float16_maxnummag(float16, float16, float_status *status); +float16 float16_minimum_number(float16, float16, float_status *status); +float16 float16_maximum_number(float16, float16, float_status *status); float16 float16_sqrt(float16, float_status *status); FloatRelation float16_compare(float16, float16, float_status *status); FloatRelation float16_compare_quiet(float16, float16, float_status *status); @@ -422,6 +424,8 @@ bfloat16 bfloat16_minnum(bfloat16, bfloat16, float_status *status); bfloat16 bfloat16_maxnum(bfloat16, bfloat16, float_status *status); bfloat16 bfloat16_minnummag(bfloat16, bfloat16, float_status *status); bfloat16 bfloat16_maxnummag(bfloat16, bfloat16, float_status *status); +bfloat16 bfloat16_minimum_number(bfloat16, bfloat16, float_status *status); +bfloat16 bfloat16_maximum_number(bfloat16, bfloat16, float_status *status); bfloat16 bfloat16_sqrt(bfloat16, float_status *status); FloatRelation bfloat16_compare(bfloat16, bfloat16, float_status *status); FloatRelation bfloat16_compare_quiet(bfloat16, bfloat16, float_status *status); @@ -589,6 +593,8 @@ float32 float32_minnum(float32, float32, float_status *status); float32 float32_maxnum(float32, float32, float_status *status); float32 float32_minnummag(float32, float32, float_status *status); float32 float32_maxnummag(float32, float32, float_status *status); +float32 float32_minimum_number(float32, float32, float_status *status); +float32 float32_maximum_number(float32, float32, float_status *status); bool float32_is_quiet_nan(float32, float_status *status); bool float32_is_signaling_nan(float32, float_status *status); float32 float32_silence_nan(float32, float_status *status); @@ -778,6 +784,8 @@ float64 float64_minnum(float64, float64, float_status *status); float64 float64_maxnum(float64, float64, float_status *status); float64 float64_minnummag(float64, float64, float_status *status); float64 float64_maxnummag(float64, float64, float_status *status); +float64 float64_minimum_number(float64, float64, float_status *status); +float64 float64_maximum_number(float64, float64, float_status *status); bool float64_is_quiet_nan(float64 a, float_status *status); bool float64_is_signaling_nan(float64, float_status *status); float64 float64_silence_nan(float64, float_status *status); @@ -1210,6 +1218,8 @@ float128 float128_minnum(float128, float128, float_status *status); float128 float128_maxnum(float128, float128, float_status *status); float128 float128_minnummag(float128, float128, float_status *status); float128 float128_maxnummag(float128, float128, float_status *status); +float128 float128_minimum_number(float128, float128, float_status *status); +float128 float128_maximum_number(float128, float128, float_status *status); bool float128_is_quiet_nan(float128, float_status *status); bool float128_is_signaling_nan(float128, float_status *status); float128 float128_silence_nan(float128, float_status *status); diff --git a/include/hw/acpi/acpi-defs.h b/include/hw/acpi/acpi-defs.h index cf9f44299c..c97e8633ad 100644 --- a/include/hw/acpi/acpi-defs.h +++ b/include/hw/acpi/acpi-defs.h @@ -48,31 +48,6 @@ typedef struct AcpiRsdpData { unsigned *xsdt_tbl_offset; } AcpiRsdpData; -/* Table structure from Linux kernel (the ACPI tables are under the - BSD license) */ - - -#define ACPI_TABLE_HEADER_DEF /* ACPI common table header */ \ - uint32_t signature; /* ACPI signature (4 ASCII characters) */ \ - uint32_t length; /* Length of table, in bytes, including header */ \ - uint8_t revision; /* ACPI Specification minor version # */ \ - uint8_t checksum; /* To make sum of entire table == 0 */ \ - uint8_t oem_id[6] \ - QEMU_NONSTRING; /* OEM identification */ \ - uint8_t oem_table_id[8] \ - QEMU_NONSTRING; /* OEM table identification */ \ - uint32_t oem_revision; /* OEM revision number */ \ - uint8_t asl_compiler_id[4] \ - QEMU_NONSTRING; /* ASL compiler vendor ID */ \ - uint32_t asl_compiler_revision; /* ASL compiler revision number */ - - -/* ACPI common table header */ -struct AcpiTableHeader { - ACPI_TABLE_HEADER_DEF -} QEMU_PACKED; -typedef struct AcpiTableHeader AcpiTableHeader; - struct AcpiGenericAddress { uint8_t space_id; /* Address space where struct or register exists */ uint8_t bit_width; /* Size in bits of given register */ @@ -80,7 +55,7 @@ struct AcpiGenericAddress { uint8_t access_width; /* ACPI 3.0: Minimum Access size (ACPI 3.0), ACPI 2.0: Reserved, Table 5-1 */ uint64_t address; /* 64-bit address of struct or register */ -} QEMU_PACKED; +}; typedef struct AcpiFadtData { struct AcpiGenericAddress pm1a_cnt; /* PM1a_CNT_BLK */ @@ -117,505 +92,4 @@ typedef struct AcpiFadtData { #define ACPI_FADT_ARM_PSCI_COMPLIANT (1 << 0) #define ACPI_FADT_ARM_PSCI_USE_HVC (1 << 1) -/* - * Serial Port Console Redirection Table (SPCR), Rev. 1.02 - * - * For .interface_type see Debug Port Table 2 (DBG2) serial port - * subtypes in Table 3, Rev. May 22, 2012 - */ -struct AcpiSerialPortConsoleRedirection { - ACPI_TABLE_HEADER_DEF - uint8_t interface_type; - uint8_t reserved1[3]; - struct AcpiGenericAddress base_address; - uint8_t interrupt_types; - uint8_t irq; - uint32_t gsi; - uint8_t baud; - uint8_t parity; - uint8_t stopbits; - uint8_t flowctrl; - uint8_t term_type; - uint8_t reserved2; - uint16_t pci_device_id; - uint16_t pci_vendor_id; - uint8_t pci_bus; - uint8_t pci_slot; - uint8_t pci_func; - uint32_t pci_flags; - uint8_t pci_seg; - uint32_t reserved3; -} QEMU_PACKED; -typedef struct AcpiSerialPortConsoleRedirection - AcpiSerialPortConsoleRedirection; - -/* - * ACPI 1.0 Root System Description Table (RSDT) - */ -struct AcpiRsdtDescriptorRev1 { - ACPI_TABLE_HEADER_DEF /* ACPI common table header */ - uint32_t table_offset_entry[]; /* Array of pointers to other */ - /* ACPI tables */ -} QEMU_PACKED; -typedef struct AcpiRsdtDescriptorRev1 AcpiRsdtDescriptorRev1; - -/* - * ACPI 2.0 eXtended System Description Table (XSDT) - */ -struct AcpiXsdtDescriptorRev2 { - ACPI_TABLE_HEADER_DEF /* ACPI common table header */ - uint64_t table_offset_entry[]; /* Array of pointers to other */ - /* ACPI tables */ -} QEMU_PACKED; -typedef struct AcpiXsdtDescriptorRev2 AcpiXsdtDescriptorRev2; - -/* - * ACPI 1.0 Firmware ACPI Control Structure (FACS) - */ -struct AcpiFacsDescriptorRev1 { - uint32_t signature; /* ACPI Signature */ - uint32_t length; /* Length of structure, in bytes */ - uint32_t hardware_signature; /* Hardware configuration signature */ - uint32_t firmware_waking_vector; /* ACPI OS waking vector */ - uint32_t global_lock; /* Global Lock */ - uint32_t flags; - uint8_t resverved3 [40]; /* Reserved - must be zero */ -} QEMU_PACKED; -typedef struct AcpiFacsDescriptorRev1 AcpiFacsDescriptorRev1; - -/* - * Differentiated System Description Table (DSDT) - */ - -/* - * MADT values and structures - */ - -/* Values for MADT PCATCompat */ - -#define ACPI_DUAL_PIC 0 -#define ACPI_MULTIPLE_APIC 1 - -/* Master MADT */ - -struct AcpiMultipleApicTable { - ACPI_TABLE_HEADER_DEF /* ACPI common table header */ - uint32_t local_apic_address; /* Physical address of local APIC */ - uint32_t flags; -} QEMU_PACKED; -typedef struct AcpiMultipleApicTable AcpiMultipleApicTable; - -/* Values for Type in APIC sub-headers */ - -#define ACPI_APIC_PROCESSOR 0 -#define ACPI_APIC_IO 1 -#define ACPI_APIC_XRUPT_OVERRIDE 2 -#define ACPI_APIC_NMI 3 -#define ACPI_APIC_LOCAL_NMI 4 -#define ACPI_APIC_ADDRESS_OVERRIDE 5 -#define ACPI_APIC_IO_SAPIC 6 -#define ACPI_APIC_LOCAL_SAPIC 7 -#define ACPI_APIC_XRUPT_SOURCE 8 -#define ACPI_APIC_LOCAL_X2APIC 9 -#define ACPI_APIC_LOCAL_X2APIC_NMI 10 -#define ACPI_APIC_GENERIC_CPU_INTERFACE 11 -#define ACPI_APIC_GENERIC_DISTRIBUTOR 12 -#define ACPI_APIC_GENERIC_MSI_FRAME 13 -#define ACPI_APIC_GENERIC_REDISTRIBUTOR 14 -#define ACPI_APIC_GENERIC_TRANSLATOR 15 -#define ACPI_APIC_RESERVED 16 /* 16 and greater are reserved */ - -/* - * MADT sub-structures (Follow MULTIPLE_APIC_DESCRIPTION_TABLE) - */ -#define ACPI_SUB_HEADER_DEF /* Common ACPI sub-structure header */\ - uint8_t type; \ - uint8_t length; - -/* Sub-structures for MADT */ - -struct AcpiMadtProcessorApic { - ACPI_SUB_HEADER_DEF - uint8_t processor_id; /* ACPI processor id */ - uint8_t local_apic_id; /* Processor's local APIC id */ - uint32_t flags; -} QEMU_PACKED; -typedef struct AcpiMadtProcessorApic AcpiMadtProcessorApic; - -struct AcpiMadtIoApic { - ACPI_SUB_HEADER_DEF - uint8_t io_apic_id; /* I/O APIC ID */ - uint8_t reserved; /* Reserved - must be zero */ - uint32_t address; /* APIC physical address */ - uint32_t interrupt; /* Global system interrupt where INTI - * lines start */ -} QEMU_PACKED; -typedef struct AcpiMadtIoApic AcpiMadtIoApic; - -struct AcpiMadtIntsrcovr { - ACPI_SUB_HEADER_DEF - uint8_t bus; - uint8_t source; - uint32_t gsi; - uint16_t flags; -} QEMU_PACKED; -typedef struct AcpiMadtIntsrcovr AcpiMadtIntsrcovr; - -struct AcpiMadtLocalNmi { - ACPI_SUB_HEADER_DEF - uint8_t processor_id; /* ACPI processor id */ - uint16_t flags; /* MPS INTI flags */ - uint8_t lint; /* Local APIC LINT# */ -} QEMU_PACKED; -typedef struct AcpiMadtLocalNmi AcpiMadtLocalNmi; - -struct AcpiMadtProcessorX2Apic { - ACPI_SUB_HEADER_DEF - uint16_t reserved; - uint32_t x2apic_id; /* Processor's local x2APIC ID */ - uint32_t flags; - uint32_t uid; /* Processor object _UID */ -} QEMU_PACKED; -typedef struct AcpiMadtProcessorX2Apic AcpiMadtProcessorX2Apic; - -struct AcpiMadtLocalX2ApicNmi { - ACPI_SUB_HEADER_DEF - uint16_t flags; /* MPS INTI flags */ - uint32_t uid; /* Processor object _UID */ - uint8_t lint; /* Local APIC LINT# */ - uint8_t reserved[3]; /* Local APIC LINT# */ -} QEMU_PACKED; -typedef struct AcpiMadtLocalX2ApicNmi AcpiMadtLocalX2ApicNmi; - -struct AcpiMadtGenericCpuInterface { - ACPI_SUB_HEADER_DEF - uint16_t reserved; - uint32_t cpu_interface_number; - uint32_t uid; - uint32_t flags; - uint32_t parking_version; - uint32_t performance_interrupt; - uint64_t parked_address; - uint64_t base_address; - uint64_t gicv_base_address; - uint64_t gich_base_address; - uint32_t vgic_interrupt; - uint64_t gicr_base_address; - uint64_t arm_mpidr; -} QEMU_PACKED; - -typedef struct AcpiMadtGenericCpuInterface AcpiMadtGenericCpuInterface; - -/* GICC CPU Interface Flags */ -#define ACPI_MADT_GICC_ENABLED 1 - -struct AcpiMadtGenericDistributor { - ACPI_SUB_HEADER_DEF - uint16_t reserved; - uint32_t gic_id; - uint64_t base_address; - uint32_t global_irq_base; - /* ACPI 5.1 Errata 1228 Present GIC version in MADT table */ - uint8_t version; - uint8_t reserved2[3]; -} QEMU_PACKED; - -typedef struct AcpiMadtGenericDistributor AcpiMadtGenericDistributor; - -struct AcpiMadtGenericMsiFrame { - ACPI_SUB_HEADER_DEF - uint16_t reserved; - uint32_t gic_msi_frame_id; - uint64_t base_address; - uint32_t flags; - uint16_t spi_count; - uint16_t spi_base; -} QEMU_PACKED; - -typedef struct AcpiMadtGenericMsiFrame AcpiMadtGenericMsiFrame; - -struct AcpiMadtGenericRedistributor { - ACPI_SUB_HEADER_DEF - uint16_t reserved; - uint64_t base_address; - uint32_t range_length; -} QEMU_PACKED; - -typedef struct AcpiMadtGenericRedistributor AcpiMadtGenericRedistributor; - -struct AcpiMadtGenericTranslator { - ACPI_SUB_HEADER_DEF - uint16_t reserved; - uint32_t translation_id; - uint64_t base_address; - uint32_t reserved2; -} QEMU_PACKED; - -typedef struct AcpiMadtGenericTranslator AcpiMadtGenericTranslator; - -/* - * Generic Timer Description Table (GTDT) - */ -#define ACPI_GTDT_INTERRUPT_MODE_LEVEL (0 << 0) -#define ACPI_GTDT_INTERRUPT_MODE_EDGE (1 << 0) -#define ACPI_GTDT_CAP_ALWAYS_ON (1 << 2) - -struct AcpiGenericTimerTable { - ACPI_TABLE_HEADER_DEF - uint64_t counter_block_addresss; - uint32_t reserved; - uint32_t secure_el1_interrupt; - uint32_t secure_el1_flags; - uint32_t non_secure_el1_interrupt; - uint32_t non_secure_el1_flags; - uint32_t virtual_timer_interrupt; - uint32_t virtual_timer_flags; - uint32_t non_secure_el2_interrupt; - uint32_t non_secure_el2_flags; - uint64_t counter_read_block_address; - uint32_t platform_timer_count; - uint32_t platform_timer_offset; -} QEMU_PACKED; -typedef struct AcpiGenericTimerTable AcpiGenericTimerTable; - -/* - * HPET Description Table - */ -struct Acpi20Hpet { - ACPI_TABLE_HEADER_DEF /* ACPI common table header */ - uint32_t timer_block_id; - struct AcpiGenericAddress addr; - uint8_t hpet_number; - uint16_t min_tick; - uint8_t page_protect; -} QEMU_PACKED; -typedef struct Acpi20Hpet Acpi20Hpet; - -/* - * SRAT (NUMA topology description) table - */ - -struct AcpiSystemResourceAffinityTable { - ACPI_TABLE_HEADER_DEF - uint32_t reserved1; - uint32_t reserved2[2]; -} QEMU_PACKED; -typedef struct AcpiSystemResourceAffinityTable AcpiSystemResourceAffinityTable; - -#define ACPI_SRAT_PROCESSOR_APIC 0 -#define ACPI_SRAT_MEMORY 1 -#define ACPI_SRAT_PROCESSOR_x2APIC 2 -#define ACPI_SRAT_PROCESSOR_GICC 3 - -struct AcpiSratProcessorAffinity { - ACPI_SUB_HEADER_DEF - uint8_t proximity_lo; - uint8_t local_apic_id; - uint32_t flags; - uint8_t local_sapic_eid; - uint8_t proximity_hi[3]; - uint32_t reserved; -} QEMU_PACKED; -typedef struct AcpiSratProcessorAffinity AcpiSratProcessorAffinity; - -struct AcpiSratProcessorX2ApicAffinity { - ACPI_SUB_HEADER_DEF - uint16_t reserved; - uint32_t proximity_domain; - uint32_t x2apic_id; - uint32_t flags; - uint32_t clk_domain; - uint32_t reserved2; -} QEMU_PACKED; -typedef struct AcpiSratProcessorX2ApicAffinity AcpiSratProcessorX2ApicAffinity; - -struct AcpiSratMemoryAffinity { - ACPI_SUB_HEADER_DEF - uint32_t proximity; - uint16_t reserved1; - uint64_t base_addr; - uint64_t range_length; - uint32_t reserved2; - uint32_t flags; - uint32_t reserved3[2]; -} QEMU_PACKED; -typedef struct AcpiSratMemoryAffinity AcpiSratMemoryAffinity; - -struct AcpiSratProcessorGiccAffinity { - ACPI_SUB_HEADER_DEF - uint32_t proximity; - uint32_t acpi_processor_uid; - uint32_t flags; - uint32_t clock_domain; -} QEMU_PACKED; - -typedef struct AcpiSratProcessorGiccAffinity AcpiSratProcessorGiccAffinity; - -/* - * TCPA Description Table - * - * Following Level 00, Rev 00.37 of specs: - * http://www.trustedcomputinggroup.org/resources/tcg_acpi_specification - */ -struct Acpi20Tcpa { - ACPI_TABLE_HEADER_DEF /* ACPI common table header */ - uint16_t platform_class; - uint32_t log_area_minimum_length; - uint64_t log_area_start_address; -} QEMU_PACKED; -typedef struct Acpi20Tcpa Acpi20Tcpa; - -/* DMAR - DMA Remapping table r2.2 */ -struct AcpiTableDmar { - ACPI_TABLE_HEADER_DEF - uint8_t host_address_width; /* Maximum DMA physical addressability */ - uint8_t flags; - uint8_t reserved[10]; -} QEMU_PACKED; -typedef struct AcpiTableDmar AcpiTableDmar; - -/* Masks for Flags field above */ -#define ACPI_DMAR_INTR_REMAP 1 -#define ACPI_DMAR_X2APIC_OPT_OUT (1 << 1) - -/* Values for sub-structure type for DMAR */ -enum { - ACPI_DMAR_TYPE_HARDWARE_UNIT = 0, /* DRHD */ - ACPI_DMAR_TYPE_RESERVED_MEMORY = 1, /* RMRR */ - ACPI_DMAR_TYPE_ATSR = 2, /* ATSR */ - ACPI_DMAR_TYPE_HARDWARE_AFFINITY = 3, /* RHSR */ - ACPI_DMAR_TYPE_ANDD = 4, /* ANDD */ - ACPI_DMAR_TYPE_RESERVED = 5 /* Reserved for furture use */ -}; - -/* - * Sub-structures for DMAR - */ - -/* Device scope structure for DRHD. */ -struct AcpiDmarDeviceScope { - uint8_t entry_type; - uint8_t length; - uint16_t reserved; - uint8_t enumeration_id; - uint8_t bus; - struct { - uint8_t device; - uint8_t function; - } path[]; -} QEMU_PACKED; -typedef struct AcpiDmarDeviceScope AcpiDmarDeviceScope; - -/* Type 0: Hardware Unit Definition */ -struct AcpiDmarHardwareUnit { - uint16_t type; - uint16_t length; - uint8_t flags; - uint8_t reserved; - uint16_t pci_segment; /* The PCI Segment associated with this unit */ - uint64_t address; /* Base address of remapping hardware register-set */ - AcpiDmarDeviceScope scope[]; -} QEMU_PACKED; -typedef struct AcpiDmarHardwareUnit AcpiDmarHardwareUnit; - -/* Type 2: Root Port ATS Capability Reporting Structure */ -struct AcpiDmarRootPortATS { - uint16_t type; - uint16_t length; - uint8_t flags; - uint8_t reserved; - uint16_t pci_segment; - AcpiDmarDeviceScope scope[]; -} QEMU_PACKED; -typedef struct AcpiDmarRootPortATS AcpiDmarRootPortATS; - -/* Masks for Flags field above */ -#define ACPI_DMAR_INCLUDE_PCI_ALL 1 -#define ACPI_DMAR_ATSR_ALL_PORTS 1 - -/* - * Input Output Remapping Table (IORT) - * Conforms to "IO Remapping Table System Software on ARM Platforms", - * Document number: ARM DEN 0049B, October 2015 - */ - -struct AcpiIortTable { - ACPI_TABLE_HEADER_DEF /* ACPI common table header */ - uint32_t node_count; - uint32_t node_offset; - uint32_t reserved; -} QEMU_PACKED; -typedef struct AcpiIortTable AcpiIortTable; - -/* - * IORT node types - */ - -#define ACPI_IORT_NODE_HEADER_DEF /* Node format common fields */ \ - uint8_t type; \ - uint16_t length; \ - uint8_t revision; \ - uint32_t reserved; \ - uint32_t mapping_count; \ - uint32_t mapping_offset; - -/* Values for node Type above */ -enum { - ACPI_IORT_NODE_ITS_GROUP = 0x00, - ACPI_IORT_NODE_NAMED_COMPONENT = 0x01, - ACPI_IORT_NODE_PCI_ROOT_COMPLEX = 0x02, - ACPI_IORT_NODE_SMMU = 0x03, - ACPI_IORT_NODE_SMMU_V3 = 0x04 -}; - -struct AcpiIortIdMapping { - uint32_t input_base; - uint32_t id_count; - uint32_t output_base; - uint32_t output_reference; - uint32_t flags; -} QEMU_PACKED; -typedef struct AcpiIortIdMapping AcpiIortIdMapping; - -struct AcpiIortMemoryAccess { - uint32_t cache_coherency; - uint8_t hints; - uint16_t reserved; - uint8_t memory_flags; -} QEMU_PACKED; -typedef struct AcpiIortMemoryAccess AcpiIortMemoryAccess; - -struct AcpiIortItsGroup { - ACPI_IORT_NODE_HEADER_DEF - uint32_t its_count; - uint32_t identifiers[]; -} QEMU_PACKED; -typedef struct AcpiIortItsGroup AcpiIortItsGroup; - -#define ACPI_IORT_SMMU_V3_COHACC_OVERRIDE 1 - -struct AcpiIortSmmu3 { - ACPI_IORT_NODE_HEADER_DEF - uint64_t base_address; - uint32_t flags; - uint32_t reserved2; - uint64_t vatos_address; - uint32_t model; - uint32_t event_gsiv; - uint32_t pri_gsiv; - uint32_t gerr_gsiv; - uint32_t sync_gsiv; - AcpiIortIdMapping id_mapping_array[]; -} QEMU_PACKED; -typedef struct AcpiIortSmmu3 AcpiIortSmmu3; - -struct AcpiIortRC { - ACPI_IORT_NODE_HEADER_DEF - AcpiIortMemoryAccess memory_properties; - uint32_t ats_attribute; - uint32_t pci_segment_number; - AcpiIortIdMapping id_mapping_array[]; -} QEMU_PACKED; -typedef struct AcpiIortRC AcpiIortRC; - #endif diff --git a/include/hw/acpi/acpi_dev_interface.h b/include/hw/acpi/acpi_dev_interface.h index 769ff55c7e..ea6056ab92 100644 --- a/include/hw/acpi/acpi_dev_interface.h +++ b/include/hw/acpi/acpi_dev_interface.h @@ -53,6 +53,7 @@ struct AcpiDeviceIfClass { void (*ospm_status)(AcpiDeviceIf *adev, ACPIOSTInfoList ***list); void (*send_event)(AcpiDeviceIf *adev, AcpiEventStatusBits ev); void (*madt_cpu)(AcpiDeviceIf *adev, int uid, - const CPUArchIdList *apic_ids, GArray *entry); + const CPUArchIdList *apic_ids, GArray *entry, + bool force_enabled); }; #endif diff --git a/include/hw/acpi/aml-build.h b/include/hw/acpi/aml-build.h index 471266d739..8346003a22 100644 --- a/include/hw/acpi/aml-build.h +++ b/include/hw/acpi/aml-build.h @@ -413,10 +413,37 @@ Aml *aml_concatenate(Aml *source1, Aml *source2, Aml *target); Aml *aml_object_type(Aml *object); void build_append_int_noprefix(GArray *table, uint64_t value, int size); -void -build_header(BIOSLinker *linker, GArray *table_data, - AcpiTableHeader *h, const char *sig, int len, uint8_t rev, - const char *oem_id, const char *oem_table_id); + +typedef struct AcpiTable { + const char *sig; + const uint8_t rev; + const char *oem_id; + const char *oem_table_id; + /* private vars tracking table state */ + GArray *array; + unsigned table_offset; +} AcpiTable; + +/** + * acpi_table_begin: + * initializes table header and keeps track of + * table data/offsets + * @desc: ACPI table descriptor + * @array: blob where the ACPI table will be composed/stored. + */ +void acpi_table_begin(AcpiTable *desc, GArray *array); + +/** + * acpi_table_end: + * sets actual table length and tells bios loader + * where table is for the later initialization on + * guest side. + * @linker: reference to BIOSLinker object to use for the table + * @table: ACPI table descriptor that was used with @acpi_table_begin + * counterpart + */ +void acpi_table_end(BIOSLinker *linker, AcpiTable *table); + void *acpi_data_push(GArray *table_data, unsigned size); unsigned acpi_data_len(GArray *table); void acpi_add_table(GArray *table_offsets, GArray *table_data); @@ -456,12 +483,15 @@ Aml *build_crs(PCIHostState *host, CrsRangeSet *range_set, uint32_t io_offset, uint32_t mmio32_offset, uint64_t mmio64_offset, uint16_t bus_nr_offset); -void build_srat_memory(AcpiSratMemoryAffinity *numamem, uint64_t base, +void build_srat_memory(GArray *table_data, uint64_t base, uint64_t len, int node, MemoryAffinityFlags flags); void build_slit(GArray *table_data, BIOSLinker *linker, MachineState *ms, const char *oem_id, const char *oem_table_id); +void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms, + const char *oem_id, const char *oem_table_id); + void build_fadt(GArray *tbl, BIOSLinker *linker, const AcpiFadtData *f, const char *oem_id, const char *oem_table_id); diff --git a/include/hw/acpi/ich9.h b/include/hw/acpi/ich9.h index a329ce43ab..f04f1791bd 100644 --- a/include/hw/acpi/ich9.h +++ b/include/hw/acpi/ich9.h @@ -29,7 +29,7 @@ #include "hw/acpi/acpi_dev_interface.h" #include "hw/acpi/tco.h" -#define ACPI_PCIHP_ADDR_ICH9 0x0cc4 +#define ACPI_PCIHP_ADDR_ICH9 0x0cc0 typedef struct ICH9LPCPMRegs { /* diff --git a/include/hw/adc/aspeed_adc.h b/include/hw/adc/aspeed_adc.h new file mode 100644 index 0000000000..2f166e8be1 --- /dev/null +++ b/include/hw/adc/aspeed_adc.h @@ -0,0 +1,55 @@ +/* + * Aspeed ADC + * + * Copyright 2017-2021 IBM Corp. + * + * Andrew Jeffery + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_ADC_ASPEED_ADC_H +#define HW_ADC_ASPEED_ADC_H + +#include "hw/sysbus.h" + +#define TYPE_ASPEED_ADC "aspeed.adc" +#define TYPE_ASPEED_2400_ADC TYPE_ASPEED_ADC "-ast2400" +#define TYPE_ASPEED_2500_ADC TYPE_ASPEED_ADC "-ast2500" +#define TYPE_ASPEED_2600_ADC TYPE_ASPEED_ADC "-ast2600" +OBJECT_DECLARE_TYPE(AspeedADCState, AspeedADCClass, ASPEED_ADC) + +#define TYPE_ASPEED_ADC_ENGINE "aspeed.adc.engine" +OBJECT_DECLARE_SIMPLE_TYPE(AspeedADCEngineState, ASPEED_ADC_ENGINE) + +#define ASPEED_ADC_NR_CHANNELS 16 +#define ASPEED_ADC_NR_REGS (0xD0 >> 2) + +struct AspeedADCEngineState { + /* */ + SysBusDevice parent; + + MemoryRegion mmio; + qemu_irq irq; + uint32_t engine_id; + uint32_t nr_channels; + uint32_t regs[ASPEED_ADC_NR_REGS]; +}; + +struct AspeedADCState { + /* */ + SysBusDevice parent; + + MemoryRegion mmio; + qemu_irq irq; + + AspeedADCEngineState engines[2]; +}; + +struct AspeedADCClass { + SysBusDeviceClass parent_class; + + uint32_t nr_engines; +}; + +#endif /* HW_ADC_ASPEED_ADC_H */ diff --git a/include/hw/arm/aspeed_soc.h b/include/hw/arm/aspeed_soc.h index 87d76c9259..8139358549 100644 --- a/include/hw/arm/aspeed_soc.h +++ b/include/hw/arm/aspeed_soc.h @@ -15,6 +15,7 @@ #include "hw/cpu/a15mpcore.h" #include "hw/intc/aspeed_vic.h" #include "hw/misc/aspeed_scu.h" +#include "hw/adc/aspeed_adc.h" #include "hw/misc/aspeed_sdmc.h" #include "hw/misc/aspeed_xdma.h" #include "hw/timer/aspeed_timer.h" @@ -53,6 +54,7 @@ struct AspeedSoCState { AspeedSCUState scu; AspeedHACEState hace; AspeedXDMAState xdma; + AspeedADCState adc; AspeedSMCState fmc; AspeedSMCState spi[ASPEED_SPIS_NUM]; EHCISysBusState ehci[ASPEED_EHCIS_NUM]; diff --git a/include/hw/arm/npcm7xx.h b/include/hw/arm/npcm7xx.h index 61ecc57ab9..ce593235d9 100644 --- a/include/hw/arm/npcm7xx.h +++ b/include/hw/arm/npcm7xx.h @@ -35,6 +35,7 @@ #include "hw/usb/hcd-ehci.h" #include "hw/usb/hcd-ohci.h" #include "target/arm/cpu.h" +#include "hw/sd/npcm7xx_sdhci.h" #define NPCM7XX_MAX_NUM_CPUS (2) @@ -103,6 +104,7 @@ typedef struct NPCM7xxState { OHCISysBusState ohci; NPCM7xxFIUState fiu[2]; NPCM7xxEMCState emc[2]; + NPCM7xxSDHCIState mmc; } NPCM7xxState; #define TYPE_NPCM7XX "npcm7xx" diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h index b461b8d261..dc6b66ffc8 100644 --- a/include/hw/arm/virt.h +++ b/include/hw/arm/virt.h @@ -125,11 +125,13 @@ struct VirtMachineClass { bool claim_edge_triggered_timers; bool smbios_old_sys_ver; bool no_highmem_ecam; - bool no_ged; /* Machines < 4.2 has no support for ACPI GED device */ + bool no_ged; /* Machines < 4.2 have no support for ACPI GED device */ bool kvm_no_adjvtime; bool no_kvm_steal_time; bool acpi_expose_flash; bool no_secure_gpio; + /* Machines < 6.2 have no support for describing cpu topology to guest */ + bool no_cpu_topology; }; struct VirtMachineState { diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h index 9b79051747..895ba12c61 100644 --- a/include/hw/arm/xlnx-versal.h +++ b/include/hw/arm/xlnx-versal.h @@ -24,6 +24,8 @@ #include "qom/object.h" #include "hw/usb/xlnx-usb-subsystem.h" #include "hw/misc/xlnx-versal-xramc.h" +#include "hw/nvram/xlnx-bbram.h" +#include "hw/nvram/xlnx-versal-efuse.h" #define TYPE_XLNX_VERSAL "xlnx-versal" OBJECT_DECLARE_SIMPLE_TYPE(Versal, XLNX_VERSAL) @@ -79,6 +81,10 @@ struct Versal { } iou; XlnxZynqMPRTC rtc; + XlnxBBRam bbram; + XlnxEFuse efuse; + XlnxVersalEFuseCtrl efuse_ctrl; + XlnxVersalEFuseCache efuse_cache; } pmc; struct { @@ -105,8 +111,10 @@ struct Versal { #define VERSAL_GEM1_WAKE_IRQ_0 59 #define VERSAL_ADMA_IRQ_0 60 #define VERSAL_XRAM_IRQ_0 79 +#define VERSAL_BBRAM_APB_IRQ_0 121 #define VERSAL_RTC_APB_ERR_IRQ 121 #define VERSAL_SD0_IRQ_0 126 +#define VERSAL_EFUSE_IRQ 139 #define VERSAL_RTC_ALARM_IRQ 142 #define VERSAL_RTC_SECONDS_IRQ 143 @@ -172,6 +180,13 @@ struct Versal { #define MM_PMC_SD0 0xf1040000U #define MM_PMC_SD0_SIZE 0x10000 +#define MM_PMC_BBRAM_CTRL 0xf11f0000 +#define MM_PMC_BBRAM_CTRL_SIZE 0x00050 +#define MM_PMC_EFUSE_CTRL 0xf1240000 +#define MM_PMC_EFUSE_CTRL_SIZE 0x00104 +#define MM_PMC_EFUSE_CACHE 0xf1250000 +#define MM_PMC_EFUSE_CACHE_SIZE 0x00C00 + #define MM_PMC_CRP 0xf1260000U #define MM_PMC_CRP_SIZE 0x10000 #define MM_PMC_RTC 0xf12a0000 diff --git a/include/hw/arm/xlnx-zynqmp.h b/include/hw/arm/xlnx-zynqmp.h index c84fe15996..062e637fe4 100644 --- a/include/hw/arm/xlnx-zynqmp.h +++ b/include/hw/arm/xlnx-zynqmp.h @@ -36,6 +36,8 @@ #include "qom/object.h" #include "net/can_emu.h" #include "hw/dma/xlnx_csu_dma.h" +#include "hw/nvram/xlnx-bbram.h" +#include "hw/nvram/xlnx-zynqmp-efuse.h" #define TYPE_XLNX_ZYNQMP "xlnx-zynqmp" OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPState, XLNX_ZYNQMP) @@ -100,6 +102,9 @@ struct XlnxZynqMPState { MemoryRegion *ddr_ram; MemoryRegion ddr_ram_low, ddr_ram_high; + XlnxBBRam bbram; + XlnxEFuse efuse; + XlnxZynqMPEFuse efuse_ctrl; MemoryRegion mr_unimp[XLNX_ZYNQMP_NUM_UNIMP_AREAS]; diff --git a/include/hw/boards.h b/include/hw/boards.h index 463a5514f9..9c1c190104 100644 --- a/include/hw/boards.h +++ b/include/hw/boards.h @@ -34,6 +34,7 @@ HotpluggableCPUList *machine_query_hotpluggable_cpus(MachineState *machine); void machine_set_cpu_numa_node(MachineState *machine, const CpuInstanceProperties *props, Error **errp); +void smp_parse(MachineState *ms, SMPConfiguration *config, Error **errp); /** * machine_class_allow_dynamic_sysbus_dev: Add type to list of valid devices @@ -51,6 +52,21 @@ void machine_set_cpu_numa_node(MachineState *machine, */ void machine_class_allow_dynamic_sysbus_dev(MachineClass *mc, const char *type); +/** + * device_type_is_dynamic_sysbus: Check if type is an allowed sysbus device + * type for the machine class. + * @mc: Machine class + * @type: type to check (should be a subtype of TYPE_SYS_BUS_DEVICE) + * + * Returns: true if @type is a type in the machine's list of + * dynamically pluggable sysbus devices; otherwise false. + * + * Check if the QOM type @type is in the list of allowed sysbus device + * types (see machine_class_allowed_dynamic_sysbus_dev()). + * Note that if @type has a parent type in the list, it is allowed too. + */ +bool device_type_is_dynamic_sysbus(MachineClass *mc, const char *type); + /** * device_is_dynamic_sysbus: test whether device is a dynamic sysbus device * @mc: Machine class @@ -108,6 +124,16 @@ typedef struct { CPUArchId cpus[]; } CPUArchIdList; +/** + * SMPCompatProps: + * @prefer_sockets - whether sockets are preferred over cores in smp parsing + * @dies_supported - whether dies are supported by the machine + */ +typedef struct { + bool prefer_sockets; + bool dies_supported; +} SMPCompatProps; + /** * MachineClass: * @deprecation_reason: If set, the machine is marked as deprecated. The @@ -169,10 +195,6 @@ typedef struct { * kvm-type may be NULL if it is not needed. * @numa_mem_supported: * true if '--numa node.mem' option is supported and false otherwise - * @smp_parse: - * The function pointer to hook different machine specific functions for - * parsing "smp-opts" from QemuOpts to MachineState::CpuTopology and more - * machine specific topology fields, such as smp_dies for PCMachine. * @hotplug_allowed: * If the hook is provided, then it'll be called for each device * hotplug to check whether the device hotplug is allowed. Return @@ -209,7 +231,6 @@ struct MachineClass { void (*reset)(MachineState *state); void (*wakeup)(MachineState *state); int (*kvm_type)(MachineState *machine, const char *arg); - void (*smp_parse)(MachineState *ms, SMPConfiguration *config, Error **errp); BlockInterfaceType block_default_type; int units_per_default_bus; @@ -247,6 +268,7 @@ struct MachineClass { bool nvdimm_supported; bool numa_mem_supported; bool auto_enable_numa; + SMPCompatProps smp_props; const char *default_ram_id; HotplugHandler *(*get_hotplug_handler)(MachineState *machine, @@ -274,17 +296,18 @@ typedef struct DeviceMemoryState { /** * CpuTopology: * @cpus: the number of present logical processors on the machine - * @cores: the number of cores in one package - * @threads: the number of threads in one core * @sockets: the number of sockets on the machine + * @dies: the number of dies in one socket + * @cores: the number of cores in one die + * @threads: the number of threads in one core * @max_cpus: the maximum number of logical processors on the machine */ typedef struct CpuTopology { unsigned int cpus; + unsigned int sockets; unsigned int dies; unsigned int cores; unsigned int threads; - unsigned int sockets; unsigned int max_cpus; } CpuTopology; @@ -294,7 +317,6 @@ typedef struct CpuTopology { struct MachineState { /*< private >*/ Object parent_obj; - Notifier sysbus_notifier; /*< public >*/ diff --git a/include/hw/char/goldfish_tty.h b/include/hw/char/goldfish_tty.h index b9dd67362a..7503d2fa1e 100644 --- a/include/hw/char/goldfish_tty.h +++ b/include/hw/char/goldfish_tty.h @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * Goldfish TTY * diff --git a/include/hw/char/mchp_pfsoc_mmuart.h b/include/hw/char/mchp_pfsoc_mmuart.h index f61990215f..b0e14ca355 100644 --- a/include/hw/char/mchp_pfsoc_mmuart.h +++ b/include/hw/char/mchp_pfsoc_mmuart.h @@ -28,18 +28,25 @@ #ifndef HW_MCHP_PFSOC_MMUART_H #define HW_MCHP_PFSOC_MMUART_H +#include "hw/sysbus.h" #include "hw/char/serial.h" -#define MCHP_PFSOC_MMUART_REG_SIZE 52 +#define MCHP_PFSOC_MMUART_REG_COUNT 13 + +#define TYPE_MCHP_PFSOC_UART "mchp.pfsoc.uart" +OBJECT_DECLARE_SIMPLE_TYPE(MchpPfSoCMMUartState, MCHP_PFSOC_UART) typedef struct MchpPfSoCMMUartState { + /*< private >*/ + SysBusDevice parent_obj; + + /*< public >*/ + MemoryRegion container; MemoryRegion iomem; - hwaddr base; - qemu_irq irq; - SerialMM *serial; + SerialMM serial_mm; - uint32_t reg[MCHP_PFSOC_MMUART_REG_SIZE / sizeof(uint32_t)]; + uint32_t reg[MCHP_PFSOC_MMUART_REG_COUNT]; } MchpPfSoCMMUartState; /** diff --git a/include/hw/clock.h b/include/hw/clock.h index 11f67fb970..5c927cee7f 100644 --- a/include/hw/clock.h +++ b/include/hw/clock.h @@ -323,10 +323,7 @@ static inline uint64_t clock_ns_to_ticks(const Clock *clk, uint64_t ns) if (clk->period == 0) { return 0; } - /* - * Ignore divu128() return value as we've caught div-by-zero and don't - * need different behaviour for overflow. - */ + divu128(&lo, &hi, clk->period); return lo; } diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index bc864564ce..e948e81f1a 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -131,7 +131,6 @@ struct CPUClass { ObjectClass *(*class_by_name)(const char *cpu_model); void (*parse_features)(const char *typename, char *str, Error **errp); - int reset_dump_flags; bool (*has_work)(CPUState *cpu); int (*memory_rw_debug)(CPUState *cpu, vaddr addr, uint8_t *buf, int len, bool is_write); @@ -149,9 +148,6 @@ struct CPUClass { void (*disas_set_info)(CPUState *cpu, disassemble_info *info); const char *deprecation_note; - /* Keep non-pointer data at the end to minimize holes. */ - int gdb_num_core_regs; - bool gdb_stop_before_watchpoint; struct AccelCPUClass *accel_cpu; /* when system emulation is not available, this pointer is NULL */ @@ -165,6 +161,13 @@ struct CPUClass { * class data that depends on the accelerator, see accel/accel-common.c. */ void (*init_accel_cpu)(struct AccelCPUClass *accel_cpu, CPUClass *cc); + + /* + * Keep non-pointer data at the end to minimize holes. + */ + int reset_dump_flags; + int gdb_num_core_regs; + bool gdb_stop_before_watchpoint; }; /* @@ -378,6 +381,7 @@ struct CPUState { struct kvm_run *kvm_run; struct kvm_dirty_gfn *kvm_dirty_gfns; uint32_t kvm_fetch_index; + uint64_t dirty_pages; /* Used for events with 'vcpu' and *without* the 'disabled' properties */ DECLARE_BITMAP(trace_dstate_delayed, CPU_TRACE_DSTATE_MAX_EVENTS); @@ -1005,6 +1009,7 @@ void QEMU_NORETURN cpu_abort(CPUState *cpu, const char *fmt, ...) GCC_FMT_ATTR(2, 3); /* $(top_srcdir)/cpu.c */ +void cpu_class_init_props(DeviceClass *dc); void cpu_exec_initfn(CPUState *cpu); void cpu_exec_realizefn(CPUState *cpu, Error **errp); void cpu_exec_unrealizefn(CPUState *cpu); diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h index 6cbe17f2e6..e13898553a 100644 --- a/include/hw/core/tcg-cpu-ops.h +++ b/include/hw/core/tcg-cpu-ops.h @@ -35,18 +35,6 @@ struct TCGCPUOps { void (*cpu_exec_enter)(CPUState *cpu); /** @cpu_exec_exit: Callback for cpu_exec cleanup */ void (*cpu_exec_exit)(CPUState *cpu); - /** - * @tlb_fill: Handle a softmmu tlb miss or user-only address fault - * - * For system mode, if the access is valid, call tlb_set_page - * and return true; if the access is invalid, and probe is - * true, return false; otherwise raise an exception and do - * not return. For user-only mode, always raise an exception - * and do not return. - */ - bool (*tlb_fill)(CPUState *cpu, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); /** @debug_excp_handler: Callback for handling debug exceptions */ void (*debug_excp_handler)(CPUState *cpu); @@ -68,6 +56,16 @@ struct TCGCPUOps { #ifdef CONFIG_SOFTMMU /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */ bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request); + /** + * @tlb_fill: Handle a softmmu tlb miss + * + * If the access is valid, call tlb_set_page and return true; + * if the access is invalid and probe is true, return false; + * otherwise raise an exception and do not return. + */ + bool (*tlb_fill)(CPUState *cpu, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); /** * @do_transaction_failed: Callback for handling failed memory transactions * (ie bus faults or external aborts; not MMU faults) @@ -111,6 +109,55 @@ struct TCGCPUOps { */ bool (*io_recompile_replay_branch)(CPUState *cpu, const TranslationBlock *tb); +#else + /** + * record_sigsegv: + * @cpu: cpu context + * @addr: faulting guest address + * @access_type: access was read/write/execute + * @maperr: true for invalid page, false for permission fault + * @ra: host pc for unwinding + * + * We are about to raise SIGSEGV with si_code set for @maperr, + * and si_addr set for @addr. Record anything further needed + * for the signal ucontext_t. + * + * If the emulated kernel does not provide anything to the signal + * handler with anything besides the user context registers, and + * the siginfo_t, then this hook need do nothing and may be omitted. + * Otherwise, record the data and return; the caller will raise + * the signal, unwind the cpu state, and return to the main loop. + * + * If it is simpler to re-use the sysemu tlb_fill code, @ra is provided + * so that a "normal" cpu exception can be raised. In this case, + * the signal must be raised by the architecture cpu_loop. + */ + void (*record_sigsegv)(CPUState *cpu, vaddr addr, + MMUAccessType access_type, + bool maperr, uintptr_t ra); + /** + * record_sigbus: + * @cpu: cpu context + * @addr: misaligned guest address + * @access_type: access was read/write/execute + * @ra: host pc for unwinding + * + * We are about to raise SIGBUS with si_code BUS_ADRALN, + * and si_addr set for @addr. Record anything further needed + * for the signal ucontext_t. + * + * If the emulated kernel does not provide the signal handler with + * anything besides the user context registers, and the siginfo_t, + * then this hook need do nothing and may be omitted. + * Otherwise, record the data and return; the caller will raise + * the signal, unwind the cpu state, and return to the main loop. + * + * If it is simpler to re-use the sysemu do_unaligned_access code, + * @ra is provided so that a "normal" cpu exception can be raised. + * In this case, the signal must be raised by the architecture cpu_loop. + */ + void (*record_sigbus)(CPUState *cpu, vaddr addr, + MMUAccessType access_type, uintptr_t ra); #endif /* CONFIG_SOFTMMU */ #endif /* NEED_CPU_H */ diff --git a/include/hw/display/macfb.h b/include/hw/display/macfb.h index 80806b0306..e52775aa21 100644 --- a/include/hw/display/macfb.h +++ b/include/hw/display/macfb.h @@ -14,9 +14,42 @@ #define MACFB_H #include "exec/memory.h" +#include "hw/irq.h" #include "ui/console.h" +#include "qemu/timer.h" #include "qom/object.h" +typedef enum { + MACFB_DISPLAY_APPLE_21_COLOR = 0, + MACFB_DISPLAY_APPLE_PORTRAIT = 1, + MACFB_DISPLAY_APPLE_12_RGB = 2, + MACFB_DISPLAY_APPLE_2PAGE_MONO = 3, + MACFB_DISPLAY_NTSC_UNDERSCAN = 4, + MACFB_DISPLAY_NTSC_OVERSCAN = 5, + MACFB_DISPLAY_APPLE_12_MONO = 6, + MACFB_DISPLAY_APPLE_13_RGB = 7, + MACFB_DISPLAY_16_COLOR = 8, + MACFB_DISPLAY_PAL1_UNDERSCAN = 9, + MACFB_DISPLAY_PAL1_OVERSCAN = 10, + MACFB_DISPLAY_PAL2_UNDERSCAN = 11, + MACFB_DISPLAY_PAL2_OVERSCAN = 12, + MACFB_DISPLAY_VGA = 13, + MACFB_DISPLAY_SVGA = 14, +} MacfbDisplayType; + +typedef struct MacFbMode { + uint8_t type; + uint8_t depth; + uint32_t mode_ctrl1; + uint32_t mode_ctrl2; + uint32_t width; + uint32_t height; + uint32_t stride; + uint32_t offset; +} MacFbMode; + +#define MACFB_NUM_REGS 8 + typedef struct MacfbState { MemoryRegion mem_vram; MemoryRegion mem_ctrl; @@ -28,6 +61,15 @@ typedef struct MacfbState { uint8_t color_palette[256 * 3]; uint32_t width, height; /* in pixels */ uint8_t depth; + uint8_t type; + + uint32_t regs[MACFB_NUM_REGS]; + MacFbMode *mode; + + uint32_t irq_state; + uint32_t irq_mask; + QEMUTimer *vbl_timer; + qemu_irq irq; } MacfbState; #define TYPE_MACFB "sysbus-macfb" @@ -46,6 +88,7 @@ struct MacfbNubusDeviceClass { DeviceClass parent_class; DeviceRealize parent_realize; + DeviceUnrealize parent_unrealize; }; diff --git a/include/hw/elf_ops.h b/include/hw/elf_ops.h index 1c37cec4ae..995de8495c 100644 --- a/include/hw/elf_ops.h +++ b/include/hw/elf_ops.h @@ -312,25 +312,26 @@ static struct elf_note *glue(get_elf_note_type, SZ)(struct elf_note *nhdr, return nhdr; } -static int glue(load_elf, SZ)(const char *name, int fd, - uint64_t (*elf_note_fn)(void *, void *, bool), - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque, - int must_swab, uint64_t *pentry, - uint64_t *lowaddr, uint64_t *highaddr, - uint32_t *pflags, int elf_machine, - int clear_lsb, int data_swab, - AddressSpace *as, bool load_rom, - symbol_fn_t sym_cb) +static ssize_t glue(load_elf, SZ)(const char *name, int fd, + uint64_t (*elf_note_fn)(void *, void *, bool), + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, + int must_swab, uint64_t *pentry, + uint64_t *lowaddr, uint64_t *highaddr, + uint32_t *pflags, int elf_machine, + int clear_lsb, int data_swab, + AddressSpace *as, bool load_rom, + symbol_fn_t sym_cb) { struct elfhdr ehdr; struct elf_phdr *phdr = NULL, *ph; - int size, i, total_size; + int size, i; + ssize_t total_size; elf_word mem_size, file_size, data_offset; uint64_t addr, low = (uint64_t)-1, high = 0; GMappedFile *mapped_file = NULL; uint8_t *data = NULL; - int ret = ELF_LOAD_FAILED; + ssize_t ret = ELF_LOAD_FAILED; if (read(fd, &ehdr, sizeof(ehdr)) != sizeof(ehdr)) goto fail; @@ -482,7 +483,7 @@ static int glue(load_elf, SZ)(const char *name, int fd, } } - if (mem_size > INT_MAX - total_size) { + if (mem_size > SSIZE_MAX - total_size) { ret = ELF_LOAD_TOO_BIG; goto fail; } diff --git a/include/hw/gpio/aspeed_gpio.h b/include/hw/gpio/aspeed_gpio.h index e1636ce7fe..801846befb 100644 --- a/include/hw/gpio/aspeed_gpio.h +++ b/include/hw/gpio/aspeed_gpio.h @@ -17,9 +17,9 @@ OBJECT_DECLARE_TYPE(AspeedGPIOState, AspeedGPIOClass, ASPEED_GPIO) #define ASPEED_GPIO_MAX_NR_SETS 8 +#define ASPEED_GPIOS_PER_SET 32 #define ASPEED_REGS_PER_BANK 14 #define ASPEED_GPIO_MAX_NR_REGS (ASPEED_REGS_PER_BANK * ASPEED_GPIO_MAX_NR_SETS) -#define ASPEED_GPIO_NR_PINS 228 #define ASPEED_GROUPS_PER_SET 4 #define ASPEED_GPIO_NR_DEBOUNCE_REGS 3 #define ASPEED_CHARS_PER_GROUP_LABEL 4 @@ -60,7 +60,6 @@ struct AspeedGPIOClass { const GPIOSetProperties *props; uint32_t nr_gpio_pins; uint32_t nr_gpio_sets; - uint32_t gap; const AspeedGPIOReg *reg_table; }; @@ -72,7 +71,7 @@ struct AspeedGPIOState { MemoryRegion iomem; int pending; qemu_irq irq; - qemu_irq gpios[ASPEED_GPIO_NR_PINS]; + qemu_irq gpios[ASPEED_GPIO_MAX_NR_SETS][ASPEED_GPIOS_PER_SET]; /* Parallel GPIO Registers */ uint32_t debounce_regs[ASPEED_GPIO_NR_DEBOUNCE_REGS]; diff --git a/include/hw/i2c/aspeed_i2c.h b/include/hw/i2c/aspeed_i2c.h index 565f833066..4b9be09274 100644 --- a/include/hw/i2c/aspeed_i2c.h +++ b/include/hw/i2c/aspeed_i2c.h @@ -36,7 +36,11 @@ OBJECT_DECLARE_TYPE(AspeedI2CState, AspeedI2CClass, ASPEED_I2C) struct AspeedI2CState; -typedef struct AspeedI2CBus { +#define TYPE_ASPEED_I2C_BUS "aspeed.i2c.bus" +OBJECT_DECLARE_SIMPLE_TYPE(AspeedI2CBus, ASPEED_I2C_BUS) +struct AspeedI2CBus { + SysBusDevice parent_obj; + struct AspeedI2CState *controller; MemoryRegion mr; @@ -54,7 +58,7 @@ typedef struct AspeedI2CBus { uint32_t pool_ctrl; uint32_t dma_addr; uint32_t dma_len; -} AspeedI2CBus; +}; struct AspeedI2CState { SysBusDevice parent_obj; diff --git a/include/hw/i386/hostmem-epc.h b/include/hw/i386/hostmem-epc.h new file mode 100644 index 0000000000..846c726085 --- /dev/null +++ b/include/hw/i386/hostmem-epc.h @@ -0,0 +1,28 @@ +/* + * SGX EPC backend + * + * Copyright (C) 2019 Intel Corporation + * + * Authors: + * Sean Christopherson + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#ifndef QEMU_HOSTMEM_EPC_H +#define QEMU_HOSTMEM_EPC_H + +#include "sysemu/hostmem.h" + +#define TYPE_MEMORY_BACKEND_EPC "memory-backend-epc" + +#define MEMORY_BACKEND_EPC(obj) \ + OBJECT_CHECK(HostMemoryBackendEpc, (obj), TYPE_MEMORY_BACKEND_EPC) + +typedef struct HostMemoryBackendEpc HostMemoryBackendEpc; + +struct HostMemoryBackendEpc { + HostMemoryBackend parent_obj; +}; + +#endif diff --git a/include/hw/i386/microvm.h b/include/hw/i386/microvm.h index f25f837441..4d9c732d4b 100644 --- a/include/hw/i386/microvm.h +++ b/include/hw/i386/microvm.h @@ -104,6 +104,10 @@ struct MicrovmMachineState { Notifier machine_done; Notifier powerdown_req; struct GPEXConfig gpex; + + /* device tree */ + void *fdt; + uint32_t ioapic_phandle[2]; }; #define TYPE_MICROVM_MACHINE MACHINE_TYPE_NAME("microvm") diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h index 4d2e35a152..9ab39e428f 100644 --- a/include/hw/i386/pc.h +++ b/include/hw/i386/pc.h @@ -12,6 +12,7 @@ #include "hw/acpi/acpi_dev_interface.h" #include "hw/hotplug.h" #include "qom/object.h" +#include "hw/i386/sgx-epc.h" #define HPET_INTCAP "hpet-intcap" @@ -34,6 +35,7 @@ typedef struct PCMachineState { I2CBus *smbus; PFlashCFI01 *flash[2]; ISADevice *pcspk; + DeviceState *iommu; /* Configuration options: */ uint64_t max_ram_below_4g; @@ -49,6 +51,8 @@ typedef struct PCMachineState { /* ACPI Memory hotplug IO base address */ hwaddr memhp_io_base; + + SGXEPCState sgx_epc; } PCMachineState; #define PC_MACHINE_ACPI_DEVICE_PROP "acpi-device" @@ -113,9 +117,6 @@ struct PCMachineClass { /* generate legacy CPU hotplug AML */ bool legacy_cpu_hotplug; - /* use DMA capable linuxboot option rom */ - bool linuxboot_dma_enabled; - /* use PVH to load kernels that support this feature */ bool pvh_enabled; @@ -187,10 +188,13 @@ bool pc_system_ovmf_table_find(const char *entry, uint8_t **data, int *data_len); void pc_system_parse_ovmf_flash(uint8_t *flash_ptr, size_t flash_size); - -/* acpi-build.c */ +/* hw/i386/acpi-common.c */ void pc_madt_cpu_entry(AcpiDeviceIf *adev, int uid, - const CPUArchIdList *apic_ids, GArray *entry); + const CPUArchIdList *apic_ids, GArray *entry, + bool force_enabled); + +/* sgx.c */ +void pc_machine_init_sgx_epc(PCMachineState *pcms); extern GlobalProperty pc_compat_6_1[]; extern const size_t pc_compat_6_1_len; diff --git a/include/hw/i386/sgx-epc.h b/include/hw/i386/sgx-epc.h new file mode 100644 index 0000000000..a6a65be854 --- /dev/null +++ b/include/hw/i386/sgx-epc.h @@ -0,0 +1,67 @@ +/* + * SGX EPC device + * + * Copyright (C) 2019 Intel Corporation + * + * Authors: + * Sean Christopherson + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#ifndef QEMU_SGX_EPC_H +#define QEMU_SGX_EPC_H + +#include "hw/i386/hostmem-epc.h" + +#define TYPE_SGX_EPC "sgx-epc" +#define SGX_EPC(obj) \ + OBJECT_CHECK(SGXEPCDevice, (obj), TYPE_SGX_EPC) +#define SGX_EPC_CLASS(oc) \ + OBJECT_CLASS_CHECK(SGXEPCDeviceClass, (oc), TYPE_SGX_EPC) +#define SGX_EPC_GET_CLASS(obj) \ + OBJECT_GET_CLASS(SGXEPCDeviceClass, (obj), TYPE_SGX_EPC) + +#define SGX_EPC_ADDR_PROP "addr" +#define SGX_EPC_SIZE_PROP "size" +#define SGX_EPC_MEMDEV_PROP "memdev" + +/** + * SGXEPCDevice: + * @addr: starting guest physical address, where @SGXEPCDevice is mapped. + * Default value: 0, means that address is auto-allocated. + * @hostmem: host memory backend providing memory for @SGXEPCDevice + */ +typedef struct SGXEPCDevice { + /* private */ + DeviceState parent_obj; + + /* public */ + uint64_t addr; + HostMemoryBackendEpc *hostmem; +} SGXEPCDevice; + +/* + * @base: address in guest physical address space where EPC regions start + * @mr: address space container for memory devices + */ +typedef struct SGXEPCState { + uint64_t base; + uint64_t size; + + MemoryRegion mr; + + struct SGXEPCDevice **sections; + int nr_sections; +} SGXEPCState; + +bool sgx_epc_get_section(int section_nr, uint64_t *addr, uint64_t *size); + +static inline uint64_t sgx_epc_above_4g_end(SGXEPCState *sgx_epc) +{ + assert(sgx_epc != NULL && sgx_epc->base >= 0x100000000ULL); + + return sgx_epc->base + sgx_epc->size; +} + +#endif diff --git a/include/hw/i386/x86-iommu.h b/include/hw/i386/x86-iommu.h index 9de92d33a1..5ba0c056d6 100644 --- a/include/hw/i386/x86-iommu.h +++ b/include/hw/i386/x86-iommu.h @@ -33,12 +33,6 @@ OBJECT_DECLARE_TYPE(X86IOMMUState, X86IOMMUClass, X86_IOMMU_DEVICE) typedef struct X86IOMMUIrq X86IOMMUIrq; typedef struct X86IOMMU_MSIMessage X86IOMMU_MSIMessage; -typedef enum IommuType { - TYPE_INTEL, - TYPE_AMD, - TYPE_NONE -} IommuType; - struct X86IOMMUClass { SysBusDeviceClass parent; /* Intel/AMD specific realize() hook */ @@ -71,7 +65,6 @@ struct X86IOMMUState { OnOffAuto intr_supported; /* Whether vIOMMU supports IR */ bool dt_supported; /* Whether vIOMMU supports DT */ bool pt_supported; /* Whether vIOMMU supports pass-through */ - IommuType type; /* IOMMU type - AMD/Intel */ QLIST_HEAD(, IEC_Notifier) iec_notifiers; /* IEC notify list */ }; @@ -140,11 +133,6 @@ struct X86IOMMU_MSIMessage { */ X86IOMMUState *x86_iommu_get_default(void); -/* - * x86_iommu_get_type - get IOMMU type - */ -IommuType x86_iommu_get_type(void); - /** * x86_iommu_iec_register_notifier - register IEC (Interrupt Entry * Cache) notifiers diff --git a/include/hw/i386/x86.h b/include/hw/i386/x86.h index 6e9244a82c..bb1cfb8896 100644 --- a/include/hw/i386/x86.h +++ b/include/hw/i386/x86.h @@ -38,6 +38,8 @@ struct X86MachineClass { bool save_tsc_khz; /* Enables contiguous-apic-ID mode */ bool compat_apic_id_mode; + /* use DMA capable linuxboot option rom */ + bool fwcfg_dma_enabled; }; struct X86MachineState { @@ -62,6 +64,7 @@ struct X86MachineState { unsigned pci_irq_mask; unsigned apic_id_limit; uint16_t boot_cpus; + SgxEPCList *sgx_epc_list; OnOffAuto smm; OnOffAuto acpi; @@ -119,8 +122,7 @@ void x86_bios_rom_init(MachineState *ms, const char *default_firmware, void x86_load_linux(X86MachineState *x86ms, FWCfgState *fw_cfg, int acpi_data_size, - bool pvh_enabled, - bool linuxboot_dma_enabled); + bool pvh_enabled); bool x86_machine_is_smm_enabled(const X86MachineState *x86ms); bool x86_machine_is_acpi_enabled(const X86MachineState *x86ms); diff --git a/include/hw/ide/internal.h b/include/hw/ide/internal.h index 79172217cc..97e7e59dc5 100644 --- a/include/hw/ide/internal.h +++ b/include/hw/ide/internal.h @@ -648,8 +648,8 @@ void ide_atapi_cmd(IDEState *s); void ide_atapi_cmd_reply_end(IDEState *s); /* hw/ide/qdev.c */ -void ide_bus_new(IDEBus *idebus, size_t idebus_size, DeviceState *dev, - int bus_id, int max_units); +void ide_bus_init(IDEBus *idebus, size_t idebus_size, DeviceState *dev, + int bus_id, int max_units); IDEDevice *ide_create_drive(IDEBus *bus, int unit, DriveInfo *drive); int ide_handle_rw_error(IDEState *s, int error, int op); diff --git a/include/hw/intc/goldfish_pic.h b/include/hw/intc/goldfish_pic.h index ad13ab37fc..e9d552f796 100644 --- a/include/hw/intc/goldfish_pic.h +++ b/include/hw/intc/goldfish_pic.h @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * Goldfish PIC * diff --git a/include/hw/intc/m68k_irqc.h b/include/hw/intc/m68k_irqc.h index dbcfcfc2e0..ef91f21812 100644 --- a/include/hw/intc/m68k_irqc.h +++ b/include/hw/intc/m68k_irqc.h @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * QEMU Motorola 680x0 IRQ Controller * diff --git a/include/hw/ipack/ipack.h b/include/hw/ipack/ipack.h index 75014e74ae..cbcdda509d 100644 --- a/include/hw/ipack/ipack.h +++ b/include/hw/ipack/ipack.h @@ -73,9 +73,9 @@ extern const VMStateDescription vmstate_ipack_device; VMSTATE_STRUCT(_field, _state, 1, vmstate_ipack_device, IPackDevice) IPackDevice *ipack_device_find(IPackBus *bus, int32_t slot); -void ipack_bus_new_inplace(IPackBus *bus, size_t bus_size, - DeviceState *parent, - const char *name, uint8_t n_slots, - qemu_irq_handler handler); +void ipack_bus_init(IPackBus *bus, size_t bus_size, + DeviceState *parent, + uint8_t n_slots, + qemu_irq_handler handler); #endif diff --git a/include/hw/isa/vt82c686.h b/include/hw/isa/vt82c686.h index 0f01aaa471..56ac141be3 100644 --- a/include/hw/isa/vt82c686.h +++ b/include/hw/isa/vt82c686.h @@ -1,6 +1,8 @@ #ifndef HW_VT82C686_H #define HW_VT82C686_H +#include "hw/pci/pci.h" + #define TYPE_VT82C686B_ISA "vt82c686b-isa" #define TYPE_VT82C686B_PM "vt82c686b-pm" #define TYPE_VT8231_ISA "vt8231-isa" @@ -8,4 +10,6 @@ #define TYPE_VIA_AC97 "via-ac97" #define TYPE_VIA_MC97 "via-mc97" +void via_isa_set_irq(PCIDevice *d, int n, int level); + #endif diff --git a/include/hw/loader.h b/include/hw/loader.h index 81104cb02f..4fa485bd61 100644 --- a/include/hw/loader.h +++ b/include/hw/loader.h @@ -90,7 +90,7 @@ int load_image_gzipped(const char *filename, hwaddr addr, uint64_t max_sz); #define ELF_LOAD_WRONG_ARCH -3 #define ELF_LOAD_WRONG_ENDIAN -4 #define ELF_LOAD_TOO_BIG -5 -const char *load_elf_strerror(int error); +const char *load_elf_strerror(ssize_t error); /** load_elf_ram_sym: * @filename: Path of ELF file @@ -128,48 +128,48 @@ const char *load_elf_strerror(int error); typedef void (*symbol_fn_t)(const char *st_name, int st_info, uint64_t st_value, uint64_t st_size); -int load_elf_ram_sym(const char *filename, - uint64_t (*elf_note_fn)(void *, void *, bool), - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque, uint64_t *pentry, - uint64_t *lowaddr, uint64_t *highaddr, uint32_t *pflags, - int big_endian, int elf_machine, - int clear_lsb, int data_swab, - AddressSpace *as, bool load_rom, symbol_fn_t sym_cb); +ssize_t load_elf_ram_sym(const char *filename, + uint64_t (*elf_note_fn)(void *, void *, bool), + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, uint64_t *pentry, + uint64_t *lowaddr, uint64_t *highaddr, + uint32_t *pflags, int big_endian, int elf_machine, + int clear_lsb, int data_swab, + AddressSpace *as, bool load_rom, symbol_fn_t sym_cb); /** load_elf_ram: * Same as load_elf_ram_sym(), but doesn't allow the caller to specify a * symbol callback function */ -int load_elf_ram(const char *filename, - uint64_t (*elf_note_fn)(void *, void *, bool), - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, - uint64_t *highaddr, uint32_t *pflags, int big_endian, - int elf_machine, int clear_lsb, int data_swab, - AddressSpace *as, bool load_rom); +ssize_t load_elf_ram(const char *filename, + uint64_t (*elf_note_fn)(void *, void *, bool), + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, uint64_t *pentry, + uint64_t *lowaddr, uint64_t *highaddr, uint32_t *pflags, + int big_endian, int elf_machine, int clear_lsb, + int data_swab, AddressSpace *as, bool load_rom); /** load_elf_as: * Same as load_elf_ram(), but always loads the elf as ROM */ -int load_elf_as(const char *filename, - uint64_t (*elf_note_fn)(void *, void *, bool), - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, - uint64_t *highaddr, uint32_t *pflags, int big_endian, - int elf_machine, int clear_lsb, int data_swab, - AddressSpace *as); +ssize_t load_elf_as(const char *filename, + uint64_t (*elf_note_fn)(void *, void *, bool), + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, + uint64_t *highaddr, uint32_t *pflags, int big_endian, + int elf_machine, int clear_lsb, int data_swab, + AddressSpace *as); /** load_elf: * Same as load_elf_as(), but doesn't allow the caller to specify an * AddressSpace. */ -int load_elf(const char *filename, - uint64_t (*elf_note_fn)(void *, void *, bool), - uint64_t (*translate_fn)(void *, uint64_t), - void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, - uint64_t *highaddr, uint32_t *pflags, int big_endian, - int elf_machine, int clear_lsb, int data_swab); +ssize_t load_elf(const char *filename, + uint64_t (*elf_note_fn)(void *, void *, bool), + uint64_t (*translate_fn)(void *, uint64_t), + void *translate_opaque, uint64_t *pentry, uint64_t *lowaddr, + uint64_t *highaddr, uint32_t *pflags, int big_endian, + int elf_machine, int clear_lsb, int data_swab); /** load_elf_hdr: * @filename: Path of ELF file diff --git a/include/hw/misc/mac_via.h b/include/hw/misc/mac_via.h index 4506abe5d0..b445565866 100644 --- a/include/hw/misc/mac_via.h +++ b/include/hw/misc/mac_via.h @@ -43,6 +43,7 @@ struct MOS6522Q800VIA1State { MemoryRegion via_mem; qemu_irq irqs[VIA1_IRQ_NB]; + qemu_irq auxmode_irq; uint8_t last_b; /* RTC */ diff --git a/include/hw/misc/virt_ctrl.h b/include/hw/misc/virt_ctrl.h index edfadc4695..25a237e518 100644 --- a/include/hw/misc/virt_ctrl.h +++ b/include/hw/misc/virt_ctrl.h @@ -1,5 +1,5 @@ /* - * SPDX-License-Identifer: GPL-2.0-or-later + * SPDX-License-Identifier: GPL-2.0-or-later * * Virt system Controller */ diff --git a/include/hw/nubus/mac-nubus-bridge.h b/include/hw/nubus/mac-nubus-bridge.h index 36aa098dd4..70ab50ab2d 100644 --- a/include/hw/nubus/mac-nubus-bridge.h +++ b/include/hw/nubus/mac-nubus-bridge.h @@ -12,13 +12,18 @@ #include "hw/nubus/nubus.h" #include "qom/object.h" +#define MAC_NUBUS_FIRST_SLOT 0x9 +#define MAC_NUBUS_LAST_SLOT 0xe +#define MAC_NUBUS_SLOT_NB (MAC_NUBUS_LAST_SLOT - MAC_NUBUS_FIRST_SLOT + 1) + #define TYPE_MAC_NUBUS_BRIDGE "mac-nubus-bridge" -OBJECT_DECLARE_SIMPLE_TYPE(MacNubusState, MAC_NUBUS_BRIDGE) +OBJECT_DECLARE_SIMPLE_TYPE(MacNubusBridge, MAC_NUBUS_BRIDGE) -struct MacNubusState { - SysBusDevice sysbus_dev; +struct MacNubusBridge { + NubusBridge parent_obj; - NubusBus *bus; + MemoryRegion super_slot_alias; + MemoryRegion slot_alias; }; #endif diff --git a/include/hw/nubus/nubus.h b/include/hw/nubus/nubus.h index e2b5cf260b..b3b4d2eadb 100644 --- a/include/hw/nubus/nubus.h +++ b/include/hw/nubus/nubus.h @@ -10,17 +10,23 @@ #define HW_NUBUS_NUBUS_H #include "hw/qdev-properties.h" +#include "hw/sysbus.h" #include "exec/address-spaces.h" #include "qom/object.h" +#include "qemu/units.h" #define NUBUS_SUPER_SLOT_SIZE 0x10000000U -#define NUBUS_SUPER_SLOT_NB 0x9 +#define NUBUS_SUPER_SLOT_NB 0xe + +#define NUBUS_SLOT_BASE (NUBUS_SUPER_SLOT_SIZE * \ + (NUBUS_SUPER_SLOT_NB + 1)) #define NUBUS_SLOT_SIZE 0x01000000 -#define NUBUS_SLOT_NB 0xF +#define NUBUS_FIRST_SLOT 0x0 +#define NUBUS_LAST_SLOT 0xf +#define NUBUS_SLOT_NB (NUBUS_LAST_SLOT - NUBUS_FIRST_SLOT + 1) -#define NUBUS_FIRST_SLOT 0x9 -#define NUBUS_LAST_SLOT 0xF +#define NUBUS_IRQS 16 #define TYPE_NUBUS_DEVICE "nubus-device" OBJECT_DECLARE_SIMPLE_TYPE(NubusDevice, NUBUS_DEVICE) @@ -29,40 +35,41 @@ OBJECT_DECLARE_SIMPLE_TYPE(NubusDevice, NUBUS_DEVICE) OBJECT_DECLARE_SIMPLE_TYPE(NubusBus, NUBUS_BUS) #define TYPE_NUBUS_BRIDGE "nubus-bridge" +OBJECT_DECLARE_SIMPLE_TYPE(NubusBridge, NUBUS_BRIDGE); struct NubusBus { BusState qbus; + AddressSpace nubus_as; + MemoryRegion nubus_mr; + MemoryRegion super_slot_io; MemoryRegion slot_io; - int current_slot; + uint16_t slot_available_mask; + + qemu_irq irqs[NUBUS_IRQS]; }; +#define NUBUS_DECL_ROM_MAX_SIZE (128 * KiB) + struct NubusDevice { DeviceState qdev; - int slot_nb; + int32_t slot; + MemoryRegion super_slot_mem; MemoryRegion slot_mem; - /* Format Block */ - - MemoryRegion fblock_io; - - uint32_t rom_length; - uint32_t rom_crc; - uint8_t rom_rev; - uint8_t rom_format; - uint8_t byte_lanes; - int32_t directory_offset; - - /* ROM */ - - MemoryRegion rom_io; - const uint8_t *rom; + char *romfile; + MemoryRegion decl_rom; }; -void nubus_register_rom(NubusDevice *dev, const uint8_t *rom, uint32_t size, - int revision, int format, uint8_t byte_lanes); +void nubus_set_irq(NubusDevice *nd, int level); + +struct NubusBridge { + SysBusDevice parent_obj; + + NubusBus bus; +}; #endif diff --git a/include/hw/nvram/xlnx-bbram.h b/include/hw/nvram/xlnx-bbram.h new file mode 100644 index 0000000000..87d59ef3c0 --- /dev/null +++ b/include/hw/nvram/xlnx-bbram.h @@ -0,0 +1,54 @@ +/* + * QEMU model of the Xilinx BBRAM Battery Backed RAM + * + * Copyright (c) 2015-2021 Xilinx Inc. + * + * Written by Edgar E. Iglesias + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef XLNX_BBRAM_H +#define XLNX_BBRAM_H + +#include "sysemu/block-backend.h" +#include "hw/qdev-core.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "hw/register.h" + +#define RMAX_XLNX_BBRAM ((0x4c / 4) + 1) + +#define TYPE_XLNX_BBRAM "xlnx,bbram-ctrl" +OBJECT_DECLARE_SIMPLE_TYPE(XlnxBBRam, XLNX_BBRAM); + +struct XlnxBBRam { + SysBusDevice parent_obj; + qemu_irq irq_bbram; + + BlockBackend *blk; + + uint32_t crc_zpads; + bool bbram8_wo; + bool blk_ro; + + uint32_t regs[RMAX_XLNX_BBRAM]; + RegisterInfo regs_info[RMAX_XLNX_BBRAM]; +}; + +#endif diff --git a/include/hw/nvram/xlnx-efuse.h b/include/hw/nvram/xlnx-efuse.h new file mode 100644 index 0000000000..58414e468b --- /dev/null +++ b/include/hw/nvram/xlnx-efuse.h @@ -0,0 +1,132 @@ +/* + * QEMU model of the Xilinx eFuse core + * + * Copyright (c) 2015 Xilinx Inc. + * + * Written by Edgar E. Iglesias + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef XLNX_EFUSE_H +#define XLNX_EFUSE_H + +#include "sysemu/block-backend.h" +#include "hw/qdev-core.h" + +#define TYPE_XLNX_EFUSE "xlnx,efuse" +OBJECT_DECLARE_SIMPLE_TYPE(XlnxEFuse, XLNX_EFUSE); + +struct XlnxEFuse { + DeviceState parent_obj; + BlockBackend *blk; + bool blk_ro; + uint32_t *fuse32; + + DeviceState *dev; + + bool init_tbits; + + uint8_t efuse_nr; + uint32_t efuse_size; + + uint32_t *ro_bits; + uint32_t ro_bits_cnt; +}; + +/** + * xlnx_efuse_calc_crc: + * @data: an array of 32-bit words for which the CRC should be computed + * @u32_cnt: the array size in number of 32-bit words + * @zpads: the number of 32-bit zeros prepended to @data before computation + * + * This function is used to compute the CRC for an array of 32-bit words, + * using a Xilinx-specific data padding. + * + * Returns: the computed 32-bit CRC + */ +uint32_t xlnx_efuse_calc_crc(const uint32_t *data, unsigned u32_cnt, + unsigned zpads); + +/** + * xlnx_efuse_get_bit: + * @s: the efuse object + * @bit: the efuse bit-address to read the data + * + * Returns: the bit, 0 or 1, at @bit of object @s + */ +bool xlnx_efuse_get_bit(XlnxEFuse *s, unsigned int bit); + +/** + * xlnx_efuse_set_bit: + * @s: the efuse object + * @bit: the efuse bit-address to be written a value of 1 + * + * Returns: true on success, false on failure + */ +bool xlnx_efuse_set_bit(XlnxEFuse *s, unsigned int bit); + +/** + * xlnx_efuse_k256_check: + * @s: the efuse object + * @crc: the 32-bit CRC to be compared with + * @start: the efuse bit-address (which must be multiple of 32) of the + * start of a 256-bit array + * + * This function computes the CRC of a 256-bit array starting at @start + * then compares to the given @crc + * + * Returns: true of @crc == computed, false otherwise + */ +bool xlnx_efuse_k256_check(XlnxEFuse *s, uint32_t crc, unsigned start); + +/** + * xlnx_efuse_tbits_check: + * @s: the efuse object + * + * This function inspects a number of efuse bits at specific addresses + * to see if they match a validation pattern. Each pattern is a group + * of 4 bits, and there are 3 groups. + * + * Returns: a 3-bit mask, where a bit of '1' means the corresponding + * group has a valid pattern. + */ +uint32_t xlnx_efuse_tbits_check(XlnxEFuse *s); + +/** + * xlnx_efuse_get_row: + * @s: the efuse object + * @bit: the efuse bit address for which a 32-bit value is read + * + * Returns: the entire 32 bits of the efuse, starting at a bit + * address that is multiple of 32 and contains the bit at @bit + */ +static inline uint32_t xlnx_efuse_get_row(XlnxEFuse *s, unsigned int bit) +{ + if (!(s->fuse32)) { + return 0; + } else { + unsigned int row_idx = bit / 32; + + assert(row_idx < (s->efuse_size * s->efuse_nr / 32)); + return s->fuse32[row_idx]; + } +} + +#endif diff --git a/include/hw/nvram/xlnx-versal-efuse.h b/include/hw/nvram/xlnx-versal-efuse.h new file mode 100644 index 0000000000..a873dc5cb0 --- /dev/null +++ b/include/hw/nvram/xlnx-versal-efuse.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2020 Xilinx Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef XLNX_VERSAL_EFUSE_H +#define XLNX_VERSAL_EFUSE_H + +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "hw/register.h" +#include "hw/nvram/xlnx-efuse.h" + +#define XLNX_VERSAL_EFUSE_CTRL_R_MAX ((0x100 / 4) + 1) + +#define TYPE_XLNX_VERSAL_EFUSE_CTRL "xlnx,versal-efuse" +#define TYPE_XLNX_VERSAL_EFUSE_CACHE "xlnx,pmc-efuse-cache" + +OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalEFuseCtrl, XLNX_VERSAL_EFUSE_CTRL); +OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalEFuseCache, XLNX_VERSAL_EFUSE_CACHE); + +struct XlnxVersalEFuseCtrl { + SysBusDevice parent_obj; + qemu_irq irq_efuse_imr; + + XlnxEFuse *efuse; + + void *extra_pg0_lock_spec; /* Opaque property */ + uint32_t extra_pg0_lock_n16; + + uint32_t regs[XLNX_VERSAL_EFUSE_CTRL_R_MAX]; + RegisterInfo regs_info[XLNX_VERSAL_EFUSE_CTRL_R_MAX]; +}; + +struct XlnxVersalEFuseCache { + SysBusDevice parent_obj; + MemoryRegion iomem; + + XlnxEFuse *efuse; +}; + +/** + * xlnx_versal_efuse_read_row: + * @s: the efuse object + * @bit: the bit-address within the 32-bit row to be read + * @denied: if non-NULL, to receive true if the row is write-only + * + * Returns: the 32-bit word containing address @bit; 0 if @denies is true + */ +uint32_t xlnx_versal_efuse_read_row(XlnxEFuse *s, uint32_t bit, bool *denied); + +#endif diff --git a/include/hw/nvram/xlnx-zynqmp-efuse.h b/include/hw/nvram/xlnx-zynqmp-efuse.h new file mode 100644 index 0000000000..6b051ec4f1 --- /dev/null +++ b/include/hw/nvram/xlnx-zynqmp-efuse.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2021 Xilinx Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#ifndef XLNX_ZYNQMP_EFUSE_H +#define XLNX_ZYNQMP_EFUSE_H + +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "hw/register.h" +#include "hw/nvram/xlnx-efuse.h" + +#define XLNX_ZYNQMP_EFUSE_R_MAX ((0x10fc / 4) + 1) + +#define TYPE_XLNX_ZYNQMP_EFUSE "xlnx,zynqmp-efuse" +OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPEFuse, XLNX_ZYNQMP_EFUSE); + +struct XlnxZynqMPEFuse { + SysBusDevice parent_obj; + qemu_irq irq; + + XlnxEFuse *efuse; + uint32_t regs[XLNX_ZYNQMP_EFUSE_R_MAX]; + RegisterInfo regs_info[XLNX_ZYNQMP_EFUSE_R_MAX]; +}; + +#endif diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h index d0f4266e37..5c4016b995 100644 --- a/include/hw/pci/pci.h +++ b/include/hw/pci/pci.h @@ -401,13 +401,17 @@ typedef PCIINTxRoute (*pci_route_irq_fn)(void *opaque, int pin); OBJECT_DECLARE_TYPE(PCIBus, PCIBusClass, PCI_BUS) #define TYPE_PCIE_BUS "PCIE" +typedef void (*pci_bus_dev_fn)(PCIBus *b, PCIDevice *d, void *opaque); +typedef void (*pci_bus_fn)(PCIBus *b, void *opaque); +typedef void *(*pci_bus_ret_fn)(PCIBus *b, void *opaque); + bool pci_bus_is_express(PCIBus *bus); -void pci_root_bus_new_inplace(PCIBus *bus, size_t bus_size, DeviceState *parent, - const char *name, - MemoryRegion *address_space_mem, - MemoryRegion *address_space_io, - uint8_t devfn_min, const char *typename); +void pci_root_bus_init(PCIBus *bus, size_t bus_size, DeviceState *parent, + const char *name, + MemoryRegion *address_space_mem, + MemoryRegion *address_space_io, + uint8_t devfn_min, const char *typename); PCIBus *pci_root_bus_new(DeviceState *parent, const char *name, MemoryRegion *address_space_mem, MemoryRegion *address_space_io, @@ -458,23 +462,23 @@ static inline int pci_dev_bus_num(const PCIDevice *dev) int pci_bus_numa_node(PCIBus *bus); void pci_for_each_device(PCIBus *bus, int bus_num, - void (*fn)(PCIBus *bus, PCIDevice *d, void *opaque), + pci_bus_dev_fn fn, void *opaque); void pci_for_each_device_reverse(PCIBus *bus, int bus_num, - void (*fn)(PCIBus *bus, PCIDevice *d, - void *opaque), + pci_bus_dev_fn fn, void *opaque); -void pci_for_each_bus_depth_first(PCIBus *bus, - void *(*begin)(PCIBus *bus, void *parent_state), - void (*end)(PCIBus *bus, void *state), - void *parent_state); +void pci_for_each_device_under_bus(PCIBus *bus, + pci_bus_dev_fn fn, void *opaque); +void pci_for_each_device_under_bus_reverse(PCIBus *bus, + pci_bus_dev_fn fn, + void *opaque); +void pci_for_each_bus_depth_first(PCIBus *bus, pci_bus_ret_fn begin, + pci_bus_fn end, void *parent_state); PCIDevice *pci_get_function_0(PCIDevice *pci_dev); /* Use this wrapper when specific scan order is not required. */ static inline -void pci_for_each_bus(PCIBus *bus, - void (*fn)(PCIBus *bus, void *opaque), - void *opaque) +void pci_for_each_bus(PCIBus *bus, pci_bus_fn fn, void *opaque) { pci_for_each_bus_depth_first(bus, NULL, fn, opaque); } diff --git a/include/hw/ppc/openpic.h b/include/hw/ppc/openpic.h index 74ff44bff0..ebdaf8a493 100644 --- a/include/hw/ppc/openpic.h +++ b/include/hw/ppc/openpic.h @@ -21,7 +21,6 @@ enum { typedef struct IrqLines { qemu_irq irq[OPENPIC_OUTPUT_NB]; } IrqLines; -#define OPENPIC_MODEL_RAVEN 0 #define OPENPIC_MODEL_FSL_MPIC_20 1 #define OPENPIC_MODEL_FSL_MPIC_42 2 #define OPENPIC_MODEL_KEYLARGO 3 @@ -32,13 +31,6 @@ typedef struct IrqLines { qemu_irq irq[OPENPIC_OUTPUT_NB]; } IrqLines; #define OPENPIC_MAX_IRQ (OPENPIC_MAX_SRC + OPENPIC_MAX_IPI + \ OPENPIC_MAX_TMR) -/* Raven */ -#define RAVEN_MAX_CPU 2 -#define RAVEN_MAX_EXT 48 -#define RAVEN_MAX_IRQ 64 -#define RAVEN_MAX_TMR OPENPIC_MAX_TMR -#define RAVEN_MAX_IPI OPENPIC_MAX_IPI - /* KeyLargo */ #define KEYLARGO_MAX_CPU 4 #define KEYLARGO_MAX_EXT 64 @@ -49,14 +41,6 @@ typedef struct IrqLines { qemu_irq irq[OPENPIC_OUTPUT_NB]; } IrqLines; /* Timers don't exist but this makes the code happy... */ #define KEYLARGO_TMR_IRQ (KEYLARGO_IPI_IRQ + KEYLARGO_MAX_IPI) -/* Interrupt definitions */ -#define RAVEN_FE_IRQ (RAVEN_MAX_EXT) /* Internal functional IRQ */ -#define RAVEN_ERR_IRQ (RAVEN_MAX_EXT + 1) /* Error IRQ */ -#define RAVEN_TMR_IRQ (RAVEN_MAX_EXT + 2) /* First timer IRQ */ -#define RAVEN_IPI_IRQ (RAVEN_TMR_IRQ + RAVEN_MAX_TMR) /* First IPI IRQ */ -/* First doorbell IRQ */ -#define RAVEN_DBL_IRQ (RAVEN_IPI_IRQ + (RAVEN_MAX_CPU * RAVEN_MAX_IPI)) - typedef struct FslMpicInfo { int max_ext; } FslMpicInfo; @@ -67,7 +51,8 @@ typedef enum IRQType { IRQ_TYPE_FSLSPECIAL, /* FSL timer/IPI interrupt, edge, no polarity */ } IRQType; -/* Round up to the nearest 64 IRQs so that the queue length +/* + * Round up to the nearest 64 IRQs so that the queue length * won't change when moving between 32 and 64 bit hosts. */ #define IRQQUEUE_SIZE_BITS ((OPENPIC_MAX_IRQ + 63) & ~63) @@ -117,8 +102,10 @@ typedef struct OpenPICTimer { bool qemu_timer_active; /* Is the qemu_timer is running? */ struct QEMUTimer *qemu_timer; struct OpenPICState *opp; /* Device timer is part of. */ - /* The QEMU_CLOCK_VIRTUAL time (in ns) corresponding to the last - current_count written or read, only defined if qemu_timer_active. */ + /* + * The QEMU_CLOCK_VIRTUAL time (in ns) corresponding to the last + * current_count written or read, only defined if qemu_timer_active. + */ uint64_t origin_time; } OpenPICTimer; diff --git a/include/hw/ppc/pnv_core.h b/include/hw/ppc/pnv_core.h index 6ecee98a76..c22eab2e1f 100644 --- a/include/hw/ppc/pnv_core.h +++ b/include/hw/ppc/pnv_core.h @@ -67,7 +67,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(PnvQuad, PNV_QUAD) struct PnvQuad { DeviceState parent_obj; - uint32_t id; + uint32_t quad_id; MemoryRegion xscom_regs; }; #endif /* PPC_PNV_CORE_H */ diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h index 637652ad16..ee7504b976 100644 --- a/include/hw/ppc/spapr.h +++ b/include/hw/ppc/spapr.h @@ -100,23 +100,30 @@ typedef enum { #define FDT_MAX_SIZE 0x200000 -/* - * NUMA related macros. MAX_DISTANCE_REF_POINTS was taken - * from Linux kernel arch/powerpc/mm/numa.h. It represents the - * amount of associativity domains for non-CPU resources. - * - * NUMA_ASSOC_SIZE is the base array size of an ibm,associativity - * array for any non-CPU resource. - * - * VCPU_ASSOC_SIZE represents the size of ibm,associativity array - * for CPUs, which has an extra element (vcpu_id) in the end. - */ -#define MAX_DISTANCE_REF_POINTS 4 -#define NUMA_ASSOC_SIZE (MAX_DISTANCE_REF_POINTS + 1) -#define VCPU_ASSOC_SIZE (NUMA_ASSOC_SIZE + 1) +/* Max number of GPUs per system */ +#define NVGPU_MAX_NUM 6 -/* Max number of these GPUsper a physical box */ -#define NVGPU_MAX_NUM 6 +/* Max number of NUMA nodes */ +#define NUMA_NODES_MAX_NUM (MAX_NODES + NVGPU_MAX_NUM) + +/* + * NUMA FORM1 macros. FORM1_DIST_REF_POINTS was taken from + * MAX_DISTANCE_REF_POINTS in arch/powerpc/mm/numa.h from Linux + * kernel source. It represents the amount of associativity domains + * for non-CPU resources. + * + * FORM1_NUMA_ASSOC_SIZE is the base array size of an ibm,associativity + * array for any non-CPU resource. + */ +#define FORM1_DIST_REF_POINTS 4 +#define FORM1_NUMA_ASSOC_SIZE (FORM1_DIST_REF_POINTS + 1) + +/* + * FORM2 NUMA affinity has a single associativity domain, giving + * us a assoc size of 2. + */ +#define FORM2_DIST_REF_POINTS 1 +#define FORM2_NUMA_ASSOC_SIZE (FORM2_DIST_REF_POINTS + 1) typedef struct SpaprCapabilities SpaprCapabilities; struct SpaprCapabilities { @@ -145,6 +152,7 @@ struct SpaprMachineClass { hwaddr rma_limit; /* clamp the RMA to this size */ bool pre_5_1_assoc_refpoints; bool pre_5_2_numa_associativity; + bool pre_6_2_numa_affinity; bool (*phb_placement)(SpaprMachineState *spapr, uint32_t index, uint64_t *buid, hwaddr *pio, @@ -249,7 +257,8 @@ struct SpaprMachineState { unsigned gpu_numa_id; SpaprTpmProxy *tpm_proxy; - uint32_t numa_assoc_array[MAX_NODES + NVGPU_MAX_NUM][NUMA_ASSOC_SIZE]; + uint32_t FORM1_assoc_array[NUMA_NODES_MAX_NUM][FORM1_NUMA_ASSOC_SIZE]; + uint32_t FORM2_assoc_array[NUMA_NODES_MAX_NUM][FORM2_NUMA_ASSOC_SIZE]; Error *fwnmi_migration_blocker; }; diff --git a/include/hw/ppc/spapr_numa.h b/include/hw/ppc/spapr_numa.h index 6f9f02d3de..7cb3367400 100644 --- a/include/hw/ppc/spapr_numa.h +++ b/include/hw/ppc/spapr_numa.h @@ -24,6 +24,7 @@ */ void spapr_numa_associativity_init(SpaprMachineState *spapr, MachineState *machine); +void spapr_numa_associativity_check(SpaprMachineState *spapr); void spapr_numa_write_rtas_dt(SpaprMachineState *spapr, void *fdt, int rtas); void spapr_numa_write_associativity_dt(SpaprMachineState *spapr, void *fdt, int offset, int nodeid); diff --git a/include/hw/ppc/spapr_ovec.h b/include/hw/ppc/spapr_ovec.h index 48b716a060..c3e8b98e7e 100644 --- a/include/hw/ppc/spapr_ovec.h +++ b/include/hw/ppc/spapr_ovec.h @@ -49,6 +49,7 @@ typedef struct SpaprOptionVector SpaprOptionVector; /* option vector 5 */ #define OV5_DRCONF_MEMORY OV_BIT(2, 2) #define OV5_FORM1_AFFINITY OV_BIT(5, 0) +#define OV5_FORM2_AFFINITY OV_BIT(5, 2) #define OV5_HP_EVT OV_BIT(6, 5) #define OV5_HPT_RESIZE OV_BIT(6, 7) #define OV5_DRMEM_V2 OV_BIT(22, 0) diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h index db76411654..b8ab0bf749 100644 --- a/include/hw/ppc/xive.h +++ b/include/hw/ppc/xive.h @@ -286,6 +286,30 @@ uint8_t xive_esb_set(uint8_t *pq, uint8_t value); uint8_t xive_source_esb_get(XiveSource *xsrc, uint32_t srcno); uint8_t xive_source_esb_set(XiveSource *xsrc, uint32_t srcno, uint8_t pq); +/* + * Source status helpers + */ +static inline void xive_source_set_status(XiveSource *xsrc, uint32_t srcno, + uint8_t status, bool enable) +{ + if (enable) { + xsrc->status[srcno] |= status; + } else { + xsrc->status[srcno] &= ~status; + } +} + +static inline void xive_source_set_asserted(XiveSource *xsrc, uint32_t srcno, + bool enable) +{ + xive_source_set_status(xsrc, srcno, XIVE_STATUS_ASSERTED, enable); +} + +static inline bool xive_source_is_asserted(XiveSource *xsrc, uint32_t srcno) +{ + return xsrc->status[srcno] & XIVE_STATUS_ASSERTED; +} + void xive_source_pic_print_info(XiveSource *xsrc, uint32_t offset, Monitor *mon); @@ -335,6 +359,11 @@ struct XiveTCTX { XivePresenter *xptr; }; +static inline uint32_t xive_tctx_word2(uint8_t *ring) +{ + return *((uint32_t *) &ring[TM_WORD2]); +} + /* * XIVE Router */ @@ -458,6 +487,17 @@ struct XiveENDSource { */ #define XIVE_PRIORITY_MAX 7 +/* + * Convert a priority number to an Interrupt Pending Buffer (IPB) + * register, which indicates a pending interrupt at the priority + * corresponding to the bit number + */ +static inline uint8_t xive_priority_to_ipb(uint8_t priority) +{ + return priority > XIVE_PRIORITY_MAX ? + 0 : 1 << (XIVE_PRIORITY_MAX - priority); +} + /* * XIVE Thread Interrupt Management Aera (TIMA) * diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h index 34c8a7506a..72622bd337 100644 --- a/include/hw/qdev-core.h +++ b/include/hw/qdev-core.h @@ -26,6 +26,7 @@ typedef enum DeviceCategory { DEVICE_CATEGORY_SOUND, DEVICE_CATEGORY_MISC, DEVICE_CATEGORY_CPU, + DEVICE_CATEGORY_WATCHDOG, DEVICE_CATEGORY_MAX } DeviceCategory; @@ -176,11 +177,11 @@ struct DeviceState { Object parent_obj; /*< public >*/ - const char *id; + char *id; char *canonical_path; bool realized; bool pending_deleted_event; - QemuOpts *opts; + QDict *opts; int hotplugged; bool allow_unplug_during_migration; BusState *parent_bus; @@ -201,8 +202,12 @@ struct DeviceListener { * informs qdev if a device should be visible or hidden. We can * hide a failover device depending for example on the device * opts. + * + * On errors, it returns false and errp is set. Device creation + * should fail in this case. */ - bool (*hide_device)(DeviceListener *listener, QemuOpts *device_opts); + bool (*hide_device)(DeviceListener *listener, const QDict *device_opts, + bool from_json, Error **errp); QTAILQ_ENTRY(DeviceListener) link; }; @@ -678,9 +683,9 @@ DeviceState *qdev_find_recursive(BusState *bus, const char *id); typedef int (qbus_walkerfn)(BusState *bus, void *opaque); typedef int (qdev_walkerfn)(DeviceState *dev, void *opaque); -void qbus_create_inplace(void *bus, size_t size, const char *typename, - DeviceState *parent, const char *name); -BusState *qbus_create(const char *typename, DeviceState *parent, const char *name); +void qbus_init(void *bus, size_t size, const char *typename, + DeviceState *parent, const char *name); +BusState *qbus_new(const char *typename, DeviceState *parent, const char *name); bool qbus_realize(BusState *bus, Error **errp); void qbus_unrealize(BusState *bus); @@ -831,13 +836,15 @@ void device_listener_unregister(DeviceListener *listener); /** * @qdev_should_hide_device: - * @opts: QemuOpts as passed on cmdline. + * @opts: options QDict + * @from_json: true if @opts entries are typed, false for all strings + * @errp: pointer to error object * * Check if a device should be added. * When a device is added via qdev_device_add() this will be called, * and return if the device should be added now or not. */ -bool qdev_should_hide_device(QemuOpts *opts); +bool qdev_should_hide_device(const QDict *opts, bool from_json, Error **errp); typedef enum MachineInitPhase { /* current_machine is NULL. */ diff --git a/include/hw/rdma/rdma.h b/include/hw/rdma/rdma.h index e77e43a170..80b2e531c4 100644 --- a/include/hw/rdma/rdma.h +++ b/include/hw/rdma/rdma.h @@ -31,7 +31,7 @@ typedef struct RdmaProvider RdmaProvider; struct RdmaProviderClass { InterfaceClass parent; - void (*print_statistics)(Monitor *mon, RdmaProvider *obj); + void (*format_statistics)(RdmaProvider *obj, GString *buf); }; #endif diff --git a/include/hw/riscv/boot.h b/include/hw/riscv/boot.h index 0e89400b09..baff11dd8a 100644 --- a/include/hw/riscv/boot.h +++ b/include/hw/riscv/boot.h @@ -31,6 +31,8 @@ bool riscv_is_32bit(RISCVHartArrayState *harts); +char *riscv_plic_hart_config_string(int hart_count); + target_ulong riscv_calc_kernel_start_addr(RISCVHartArrayState *harts, target_ulong firmware_end_addr); target_ulong riscv_find_and_load_firmware(MachineState *machine, diff --git a/include/hw/riscv/microchip_pfsoc.h b/include/hw/riscv/microchip_pfsoc.h index d30916f45d..a0673f5f59 100644 --- a/include/hw/riscv/microchip_pfsoc.h +++ b/include/hw/riscv/microchip_pfsoc.h @@ -138,7 +138,6 @@ enum { #define MICROCHIP_PFSOC_MANAGEMENT_CPU_COUNT 1 #define MICROCHIP_PFSOC_COMPUTE_CPU_COUNT 4 -#define MICROCHIP_PFSOC_PLIC_HART_CONFIG "MS" #define MICROCHIP_PFSOC_PLIC_NUM_SOURCES 185 #define MICROCHIP_PFSOC_PLIC_NUM_PRIORITIES 7 #define MICROCHIP_PFSOC_PLIC_PRIORITY_BASE 0x04 diff --git a/include/hw/riscv/opentitan.h b/include/hw/riscv/opentitan.h index 9f93bebdac..eac35ef590 100644 --- a/include/hw/riscv/opentitan.h +++ b/include/hw/riscv/opentitan.h @@ -20,7 +20,7 @@ #define HW_OPENTITAN_H #include "hw/riscv/riscv_hart.h" -#include "hw/intc/ibex_plic.h" +#include "hw/intc/sifive_plic.h" #include "hw/char/ibex_uart.h" #include "hw/timer/ibex_timer.h" #include "qom/object.h" @@ -34,7 +34,7 @@ struct LowRISCIbexSoCState { /*< public >*/ RISCVHartArrayState cpus; - IbexPlicState plic; + SiFivePLICState plic; IbexUartState uart; IbexTimerState timer; @@ -87,7 +87,7 @@ enum { }; enum { - IBEX_TIMER_TIMEREXPIRED0_0 = 125, + IBEX_TIMER_TIMEREXPIRED0_0 = 126, IBEX_UART0_RX_PARITY_ERR_IRQ = 8, IBEX_UART0_RX_TIMEOUT_IRQ = 7, IBEX_UART0_RX_BREAK_ERR_IRQ = 6, diff --git a/include/hw/riscv/sifive_u.h b/include/hw/riscv/sifive_u.h index f71c90c94c..8f63a183c4 100644 --- a/include/hw/riscv/sifive_u.h +++ b/include/hw/riscv/sifive_u.h @@ -156,7 +156,6 @@ enum { #define SIFIVE_U_MANAGEMENT_CPU_COUNT 1 #define SIFIVE_U_COMPUTE_CPU_COUNT 4 -#define SIFIVE_U_PLIC_HART_CONFIG "MS" #define SIFIVE_U_PLIC_NUM_SOURCES 54 #define SIFIVE_U_PLIC_NUM_PRIORITIES 7 #define SIFIVE_U_PLIC_PRIORITY_BASE 0x04 diff --git a/include/hw/riscv/virt.h b/include/hw/riscv/virt.h index d9105c1886..b8ef99f348 100644 --- a/include/hw/riscv/virt.h +++ b/include/hw/riscv/virt.h @@ -73,7 +73,6 @@ enum { VIRTIO_NDEV = 0x35 /* Arbitrary maximum number of interrupts */ }; -#define VIRT_PLIC_HART_CONFIG "MS" #define VIRT_PLIC_NUM_SOURCES 127 #define VIRT_PLIC_NUM_PRIORITIES 7 #define VIRT_PLIC_PRIORITY_BASE 0x04 diff --git a/include/hw/scsi/scsi.h b/include/hw/scsi/scsi.h index 0b726bc78c..a567a5ed86 100644 --- a/include/hw/scsi/scsi.h +++ b/include/hw/scsi/scsi.h @@ -146,8 +146,34 @@ struct SCSIBus { const SCSIBusInfo *info; }; -void scsi_bus_new(SCSIBus *bus, size_t bus_size, DeviceState *host, - const SCSIBusInfo *info, const char *bus_name); +/** + * scsi_bus_init_named: Initialize a SCSI bus with the specified name + * @bus: SCSIBus object to initialize + * @bus_size: size of @bus object + * @host: Device which owns the bus (generally the SCSI controller) + * @info: structure defining callbacks etc for the controller + * @bus_name: Name to use for this bus + * + * This in-place initializes @bus as a new SCSI bus with a name + * provided by the caller. It is the caller's responsibility to make + * sure that name does not clash with the name of any other bus in the + * system. Unless you need the new bus to have a specific name, you + * should use scsi_bus_new() instead. + */ +void scsi_bus_init_named(SCSIBus *bus, size_t bus_size, DeviceState *host, + const SCSIBusInfo *info, const char *bus_name); + +/** + * scsi_bus_init: Initialize a SCSI bus + * + * This in-place-initializes @bus as a new SCSI bus and gives it + * an automatically generated unique name. + */ +static inline void scsi_bus_init(SCSIBus *bus, size_t bus_size, + DeviceState *host, const SCSIBusInfo *info) +{ + scsi_bus_init_named(bus, bus_size, host, info, NULL); +} static inline SCSIBus *scsi_bus_from_device(SCSIDevice *d) { diff --git a/include/hw/sd/npcm7xx_sdhci.h b/include/hw/sd/npcm7xx_sdhci.h new file mode 100644 index 0000000000..d728f0a40d --- /dev/null +++ b/include/hw/sd/npcm7xx_sdhci.h @@ -0,0 +1,65 @@ +/* + * NPCM7xx SD-3.0 / eMMC-4.51 Host Controller + * + * Copyright (c) 2021 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#ifndef NPCM7XX_SDHCI_H +#define NPCM7XX_SDHCI_H + +#include "hw/sd/sdhci.h" +#include "qom/object.h" + +#define TYPE_NPCM7XX_SDHCI "npcm7xx.sdhci" +#define NPCM7XX_PRSTVALS_SIZE 6 +#define NPCM7XX_PRSTVALS 0x60 +#define NPCM7XX_PRSTVALS_0 0x0 +#define NPCM7XX_PRSTVALS_1 0x2 +#define NPCM7XX_PRSTVALS_2 0x4 +#define NPCM7XX_PRSTVALS_3 0x6 +#define NPCM7XX_PRSTVALS_4 0x8 +#define NPCM7XX_PRSTVALS_5 0xA +#define NPCM7XX_BOOTTOCTRL 0x10 +#define NPCM7XX_SDHCI_REGSIZE 0x20 + +#define NPCM7XX_PRSNTS_RESET 0x04A00000 +#define NPCM7XX_BLKGAP_RESET 0x80 +#define NPCM7XX_CAPAB_RESET 0x0100200161EE0399 +#define NPCM7XX_MAXCURR_RESET 0x0000000000000005 +#define NPCM7XX_HCVER_RESET 0x1002 + +#define NPCM7XX_PRSTVALS_0_RESET 0x0040 +#define NPCM7XX_PRSTVALS_1_RESET 0x0001 +#define NPCM7XX_PRSTVALS_3_RESET 0x0001 + +OBJECT_DECLARE_SIMPLE_TYPE(NPCM7xxSDHCIState, NPCM7XX_SDHCI) + +typedef struct NPCM7xxRegs { + /* Preset Values Register Field, read-only */ + uint16_t prstvals[NPCM7XX_PRSTVALS_SIZE]; + /* Boot Timeout Control Register, read-write */ + uint32_t boottoctrl; +} NPCM7xxRegisters; + +typedef struct NPCM7xxSDHCIState { + SysBusDevice parent; + + MemoryRegion container; + MemoryRegion iomem; + BusState *bus; + NPCM7xxRegisters regs; + + SDHCIState sdhci; +} NPCM7xxSDHCIState; + +#endif /* NPCM7XX_SDHCI_H */ diff --git a/include/hw/sh4/sh.h b/include/hw/sh4/sh.h index becb596979..ec716cdd45 100644 --- a/include/hw/sh4/sh.h +++ b/include/hw/sh4/sh.h @@ -44,25 +44,18 @@ typedef struct { uint16_t portbmask_trigger; /* Return 0 if no action was taken */ int (*port_change_cb) (uint16_t porta, uint16_t portb, - uint16_t * periph_pdtra, - uint16_t * periph_portdira, - uint16_t * periph_pdtrb, - uint16_t * periph_portdirb); + uint16_t *periph_pdtra, + uint16_t *periph_portdira, + uint16_t *periph_pdtrb, + uint16_t *periph_portdirb); } sh7750_io_device; int sh7750_register_io_device(struct SH7750State *s, - sh7750_io_device * device); + sh7750_io_device *device); /* sh_serial.c */ +#define TYPE_SH_SERIAL "sh-serial" #define SH_SERIAL_FEAT_SCIF (1 << 0) -void sh_serial_init(MemoryRegion *sysmem, - hwaddr base, int feat, - uint32_t freq, Chardev *chr, - qemu_irq eri_source, - qemu_irq rxi_source, - qemu_irq txi_source, - qemu_irq tei_source, - qemu_irq bri_source); /* sh7750.c */ qemu_irq sh7750_irl(struct SH7750State *s); diff --git a/include/hw/sh4/sh_intc.h b/include/hw/sh4/sh_intc.h index 65f3425057..f62d5c5e13 100644 --- a/include/hw/sh4/sh_intc.h +++ b/include/hw/sh4/sh_intc.h @@ -58,7 +58,7 @@ struct intc_desc { }; int sh_intc_get_pending_vector(struct intc_desc *desc, int imask); -struct intc_source *sh_intc_source(struct intc_desc *desc, intc_enum id); + void sh_intc_toggle_source(struct intc_source *source, int enable_adj, int assert_adj); diff --git a/include/hw/ssi/aspeed_smc.h b/include/hw/ssi/aspeed_smc.h index cdaf165300..e265555819 100644 --- a/include/hw/ssi/aspeed_smc.h +++ b/include/hw/ssi/aspeed_smc.h @@ -29,66 +29,33 @@ #include "hw/sysbus.h" #include "qom/object.h" -typedef struct AspeedSegments { - hwaddr addr; - uint32_t size; -} AspeedSegments; - struct AspeedSMCState; -typedef struct AspeedSMCController { - const char *name; - uint8_t r_conf; - uint8_t r_ce_ctrl; - uint8_t r_ctrl0; - uint8_t r_timings; - uint8_t nregs_timings; - uint8_t conf_enable_w0; - uint8_t max_peripherals; - const AspeedSegments *segments; - hwaddr flash_window_base; - uint32_t flash_window_size; - uint32_t features; - hwaddr dma_flash_mask; - hwaddr dma_dram_mask; - uint32_t nregs; - uint32_t (*segment_to_reg)(const struct AspeedSMCState *s, - const AspeedSegments *seg); - void (*reg_to_segment)(const struct AspeedSMCState *s, uint32_t reg, - AspeedSegments *seg); - void (*dma_ctrl)(struct AspeedSMCState *s, uint32_t value); -} AspeedSMCController; -typedef struct AspeedSMCFlash { +#define TYPE_ASPEED_SMC_FLASH "aspeed.smc.flash" +OBJECT_DECLARE_SIMPLE_TYPE(AspeedSMCFlash, ASPEED_SMC_FLASH) +struct AspeedSMCFlash { + SysBusDevice parent_obj; + struct AspeedSMCState *controller; - - uint8_t id; - uint32_t size; + uint8_t cs; MemoryRegion mmio; - DeviceState *flash; -} AspeedSMCFlash; +}; #define TYPE_ASPEED_SMC "aspeed.smc" OBJECT_DECLARE_TYPE(AspeedSMCState, AspeedSMCClass, ASPEED_SMC) -struct AspeedSMCClass { - SysBusDevice parent_obj; - const AspeedSMCController *ctrl; -}; - #define ASPEED_SMC_R_MAX (0x100 / 4) +#define ASPEED_SMC_CS_MAX 5 struct AspeedSMCState { SysBusDevice parent_obj; - const AspeedSMCController *ctrl; - MemoryRegion mmio; + MemoryRegion mmio_flash_container; MemoryRegion mmio_flash; - MemoryRegion mmio_flash_alias; qemu_irq irq; - int irqline; uint32_t num_cs; qemu_irq *cs_lines; @@ -109,10 +76,41 @@ struct AspeedSMCState { MemoryRegion *dram_mr; AddressSpace dram_as; - AspeedSMCFlash *flashes; + AspeedSMCFlash flashes[ASPEED_SMC_CS_MAX]; uint8_t snoop_index; uint8_t snoop_dummies; }; +typedef struct AspeedSegments { + hwaddr addr; + uint32_t size; +} AspeedSegments; + +struct AspeedSMCClass { + SysBusDeviceClass parent_obj; + + uint8_t r_conf; + uint8_t r_ce_ctrl; + uint8_t r_ctrl0; + uint8_t r_timings; + uint8_t nregs_timings; + uint8_t conf_enable_w0; + uint8_t max_peripherals; + const uint32_t *resets; + const AspeedSegments *segments; + hwaddr flash_window_base; + uint32_t flash_window_size; + uint32_t features; + hwaddr dma_flash_mask; + hwaddr dma_dram_mask; + uint32_t nregs; + uint32_t (*segment_to_reg)(const AspeedSMCState *s, + const AspeedSegments *seg); + void (*reg_to_segment)(const AspeedSMCState *s, uint32_t reg, + AspeedSegments *seg); + void (*dma_ctrl)(AspeedSMCState *s, uint32_t value); + int (*addr_width)(const AspeedSMCState *s); +}; + #endif /* ASPEED_SMC_H */ diff --git a/include/hw/usb/msd.h b/include/hw/usb/msd.h index 7538c54569..54e9f38bda 100644 --- a/include/hw/usb/msd.h +++ b/include/hw/usb/msd.h @@ -17,7 +17,7 @@ enum USBMSDMode { USB_MSDM_CSW /* Command Status. */ }; -struct usb_msd_csw { +struct QEMU_PACKED usb_msd_csw { uint32_t sig; uint32_t tag; uint32_t residue; diff --git a/include/hw/virtio/vhost-user-rng.h b/include/hw/virtio/vhost-user-rng.h new file mode 100644 index 0000000000..071539996d --- /dev/null +++ b/include/hw/virtio/vhost-user-rng.h @@ -0,0 +1,33 @@ +/* + * Vhost-user RNG virtio device + * + * Copyright (c) 2021 Mathieu Poirier + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef _QEMU_VHOST_USER_RNG_H +#define _QEMU_VHOST_USER_RNG_H + +#include "hw/virtio/virtio.h" +#include "hw/virtio/vhost.h" +#include "hw/virtio/vhost-user.h" +#include "chardev/char-fe.h" + +#define TYPE_VHOST_USER_RNG "vhost-user-rng" +OBJECT_DECLARE_SIMPLE_TYPE(VHostUserRNG, VHOST_USER_RNG) + +struct VHostUserRNG { + /*< private >*/ + VirtIODevice parent; + CharBackend chardev; + struct vhost_virtqueue *vhost_vq; + struct vhost_dev vhost_dev; + VhostUserState vhost_user; + VirtQueue *req_vq; + bool connected; + + /*< public >*/ +}; + +#endif /* _QEMU_VHOST_USER_RNG_H */ diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h index a8963da2d9..3ce79a646d 100644 --- a/include/hw/virtio/vhost-vdpa.h +++ b/include/hw/virtio/vhost-vdpa.h @@ -13,6 +13,7 @@ #define HW_VIRTIO_VHOST_VDPA_H #include "hw/virtio/virtio.h" +#include "standard-headers/linux/vhost_types.h" typedef struct VhostVDPAHostNotifier { MemoryRegion mr; @@ -21,9 +22,11 @@ typedef struct VhostVDPAHostNotifier { typedef struct vhost_vdpa { int device_fd; + int index; uint32_t msg_type; bool iotlb_batch_begin_sent; MemoryListener listener; + struct vhost_vdpa_iova_range iova_range; struct vhost_dev *dev; VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX]; } VhostVDPA; diff --git a/include/hw/virtio/vhost-vsock-common.h b/include/hw/virtio/vhost-vsock-common.h index e412b5ee98..d8b565b4da 100644 --- a/include/hw/virtio/vhost-vsock-common.h +++ b/include/hw/virtio/vhost-vsock-common.h @@ -35,6 +35,9 @@ struct VHostVSockCommon { VirtQueue *trans_vq; QEMUTimer *post_load_timer; + + /* features */ + OnOffAuto seqpacket; }; int vhost_vsock_common_start(VirtIODevice *vdev); @@ -43,5 +46,7 @@ int vhost_vsock_common_pre_save(void *opaque); int vhost_vsock_common_post_load(void *opaque, int version_id); void vhost_vsock_common_realize(VirtIODevice *vdev, const char *name); void vhost_vsock_common_unrealize(VirtIODevice *vdev); +uint64_t vhost_vsock_common_get_features(VirtIODevice *vdev, uint64_t features, + Error **errp); #endif /* _QEMU_VHOST_VSOCK_COMMON_H */ diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h index 1a9fc65089..3fa0b554ef 100644 --- a/include/hw/virtio/vhost.h +++ b/include/hw/virtio/vhost.h @@ -74,6 +74,8 @@ struct vhost_dev { unsigned int nvqs; /* the first virtqueue which would be used by this vhost dev */ int vq_index; + /* the last vq index for the virtio device (not vhost) */ + int last_index; /* if non-zero, minimum required value for max_queues */ int num_queues; uint64_t features; diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h index 24c6628944..acfba7c76c 100644 --- a/include/hw/virtio/virtio-gpu.h +++ b/include/hw/virtio/virtio-gpu.h @@ -187,7 +187,7 @@ struct VirtIOGPU { struct { QTAILQ_HEAD(, VGPUDMABuf) bufs; - VGPUDMABuf *primary; + VGPUDMABuf *primary[VIRTIO_GPU_MAX_SCANOUTS]; } dmabuf; }; @@ -273,7 +273,8 @@ void virtio_gpu_fini_udmabuf(struct virtio_gpu_simple_resource *res); int virtio_gpu_update_dmabuf(VirtIOGPU *g, uint32_t scanout_id, struct virtio_gpu_simple_resource *res, - struct virtio_gpu_framebuffer *fb); + struct virtio_gpu_framebuffer *fb, + struct virtio_gpu_rect *r); /* virtio-gpu-3d.c */ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, diff --git a/include/hw/virtio/virtio-iommu.h b/include/hw/virtio/virtio-iommu.h index 273e35c04b..e2339e5b72 100644 --- a/include/hw/virtio/virtio-iommu.h +++ b/include/hw/virtio/virtio-iommu.h @@ -26,7 +26,7 @@ #include "qom/object.h" #define TYPE_VIRTIO_IOMMU "virtio-iommu-device" -#define TYPE_VIRTIO_IOMMU_PCI "virtio-iommu-device-base" +#define TYPE_VIRTIO_IOMMU_PCI "virtio-iommu-pci" OBJECT_DECLARE_SIMPLE_TYPE(VirtIOIOMMU, VIRTIO_IOMMU) #define TYPE_VIRTIO_IOMMU_MEMORY_REGION "virtio-iommu-memory-region" diff --git a/include/hw/virtio/virtio-mem.h b/include/hw/virtio/virtio-mem.h index 9a6e348fa2..a5dd6a493b 100644 --- a/include/hw/virtio/virtio-mem.h +++ b/include/hw/virtio/virtio-mem.h @@ -65,9 +65,6 @@ struct VirtIOMEM { /* notifiers to notify when "size" changes */ NotifierList size_change_notifiers; - /* don't migrate unplugged memory */ - NotifierWithReturn precopy_notifier; - /* listeners to notify on plug/unplug activity. */ QLIST_HEAD(, RamDiscardListener) rdl_list; }; diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h index 824a69c23f..eb87032627 100644 --- a/include/hw/virtio/virtio-net.h +++ b/include/hw/virtio/virtio-net.h @@ -194,8 +194,9 @@ struct VirtIONet { NICConf nic_conf; DeviceState *qdev; int multiqueue; - uint16_t max_queues; - uint16_t curr_queues; + uint16_t max_queue_pairs; + uint16_t curr_queue_pairs; + uint16_t max_ncs; size_t config_size; char *netclient_name; char *netclient_type; @@ -209,6 +210,8 @@ struct VirtIONet { bool failover_primary_hidden; bool failover; DeviceListener primary_listener; + QDict *primary_opts; + bool primary_opts_from_json; Notifier migration_state; VirtioNetRssData rss_data; struct NetRxPkt *rx_pkt; diff --git a/include/libdecnumber/decNumber.h b/include/libdecnumber/decNumber.h index aa115fed07..41bc2a0d36 100644 --- a/include/libdecnumber/decNumber.h +++ b/include/libdecnumber/decNumber.h @@ -116,12 +116,16 @@ decNumber * decNumberFromUInt32(decNumber *, uint32_t); decNumber *decNumberFromInt64(decNumber *, int64_t); decNumber *decNumberFromUInt64(decNumber *, uint64_t); + decNumber *decNumberFromInt128(decNumber *, uint64_t, int64_t); + decNumber *decNumberFromUInt128(decNumber *, uint64_t, uint64_t); decNumber * decNumberFromString(decNumber *, const char *, decContext *); char * decNumberToString(const decNumber *, char *); char * decNumberToEngString(const decNumber *, char *); uint32_t decNumberToUInt32(const decNumber *, decContext *); int32_t decNumberToInt32(const decNumber *, decContext *); int64_t decNumberIntegralToInt64(const decNumber *dn, decContext *set); + void decNumberIntegralToInt128(const decNumber *dn, decContext *set, + uint64_t *plow, uint64_t *phigh); uint8_t * decNumberGetBCD(const decNumber *, uint8_t *); decNumber * decNumberSetBCD(decNumber *, const uint8_t *, uint32_t); diff --git a/include/libdecnumber/decNumberLocal.h b/include/libdecnumber/decNumberLocal.h index 4d53c077f2..6198ca8593 100644 --- a/include/libdecnumber/decNumberLocal.h +++ b/include/libdecnumber/decNumberLocal.h @@ -98,7 +98,7 @@ /* Shared lookup tables */ extern const uByte DECSTICKYTAB[10]; /* re-round digits if sticky */ - extern const uLong DECPOWERS[19]; /* powers of ten table */ + extern const uLong DECPOWERS[20]; /* powers of ten table */ /* The following are included from decDPD.h */ extern const uShort DPD2BIN[1024]; /* DPD -> 0-999 */ extern const uShort BIN2DPD[1000]; /* 0-999 -> DPD */ diff --git a/include/migration/blocker.h b/include/migration/blocker.h index acd27018e9..9cebe2ba06 100644 --- a/include/migration/blocker.h +++ b/include/migration/blocker.h @@ -25,6 +25,22 @@ */ int migrate_add_blocker(Error *reason, Error **errp); +/** + * @migrate_add_blocker_internal - prevent migration from proceeding without + * only-migrate implications + * + * @reason - an error to be returned whenever migration is attempted + * + * @errp - [out] The reason (if any) we cannot block migration right now. + * + * @returns - 0 on success, -EBUSY on failure, with errp set. + * + * Some of the migration blockers can be temporary (e.g., for a few seconds), + * so it shouldn't need to conflict with "-only-migratable". For those cases, + * we can call this function rather than @migrate_add_blocker(). + */ +int migrate_add_blocker_internal(Error *reason, Error **errp); + /** * @migrate_del_blocker - remove a blocking error from migration * diff --git a/include/monitor/hmp-target.h b/include/monitor/hmp-target.h index 60fc92722a..ffdc15a34b 100644 --- a/include/monitor/hmp-target.h +++ b/include/monitor/hmp-target.h @@ -48,6 +48,7 @@ void hmp_info_mem(Monitor *mon, const QDict *qdict); void hmp_info_tlb(Monitor *mon, const QDict *qdict); void hmp_mce(Monitor *mon, const QDict *qdict); void hmp_info_local_apic(Monitor *mon, const QDict *qdict); -void hmp_info_io_apic(Monitor *mon, const QDict *qdict); +void hmp_info_sev(Monitor *mon, const QDict *qdict); +void hmp_info_sgx(Monitor *mon, const QDict *qdict); #endif /* MONITOR_HMP_TARGET_H */ diff --git a/include/monitor/hmp.h b/include/monitor/hmp.h index 3baa1058e2..96d014826a 100644 --- a/include/monitor/hmp.h +++ b/include/monitor/hmp.h @@ -15,8 +15,9 @@ #define HMP_H #include "qemu/readline.h" +#include "qapi/qapi-types-common.h" -void hmp_handle_error(Monitor *mon, Error *err); +bool hmp_handle_error(Monitor *mon, Error *err); void hmp_info_name(Monitor *mon, const QDict *qdict); void hmp_info_version(Monitor *mon, const QDict *qdict); @@ -124,12 +125,13 @@ void hmp_info_ramblock(Monitor *mon, const QDict *qdict); void hmp_hotpluggable_cpus(Monitor *mon, const QDict *qdict); void hmp_info_vm_generation_id(Monitor *mon, const QDict *qdict); void hmp_info_memory_size_summary(Monitor *mon, const QDict *qdict); -void hmp_info_sev(Monitor *mon, const QDict *qdict); void hmp_info_replay(Monitor *mon, const QDict *qdict); void hmp_replay_break(Monitor *mon, const QDict *qdict); void hmp_replay_delete_break(Monitor *mon, const QDict *qdict); void hmp_replay_seek(Monitor *mon, const QDict *qdict); void hmp_info_dirty_rate(Monitor *mon, const QDict *qdict); void hmp_calc_dirty_rate(Monitor *mon, const QDict *qdict); +void hmp_human_readable_text_helper(Monitor *mon, + HumanReadableText *(*qmp_handler)(Error **)); #endif diff --git a/include/monitor/monitor.h b/include/monitor/monitor.h index 1a8a369b50..12d395d62d 100644 --- a/include/monitor/monitor.h +++ b/include/monitor/monitor.h @@ -53,5 +53,7 @@ int64_t monitor_fdset_dup_fd_find(int dup_fd); void monitor_register_hmp(const char *name, bool info, void (*cmd)(Monitor *mon, const QDict *qdict)); +void monitor_register_hmp_info_hrt(const char *name, + HumanReadableText *(*handler)(Error **errp)); #endif /* MONITOR_H */ diff --git a/include/monitor/qdev.h b/include/monitor/qdev.h index eaa947d73a..1d57bf6577 100644 --- a/include/monitor/qdev.h +++ b/include/monitor/qdev.h @@ -9,6 +9,31 @@ void qmp_device_add(QDict *qdict, QObject **ret_data, Error **errp); int qdev_device_help(QemuOpts *opts); DeviceState *qdev_device_add(QemuOpts *opts, Error **errp); -void qdev_set_id(DeviceState *dev, const char *id); +DeviceState *qdev_device_add_from_qdict(const QDict *opts, + bool from_json, Error **errp); + +/** + * qdev_set_id: parent the device and set its id if provided. + * @dev: device to handle + * @id: id to be given to the device, or NULL. + * + * Returns: the id of the device in case of success; otherwise NULL. + * + * @dev must be unrealized, unparented and must not have an id. + * + * If @id is non-NULL, this function tries to setup @dev qom path as + * "/peripheral/id". If @id is already taken, it fails. If it succeeds, + * the id field of @dev is set to @id (@dev now owns the given @id + * parameter). + * + * If @id is NULL, this function generates a unique name and setups @dev + * qom path as "/peripheral-anon/name". This name is not set as the id + * of @dev. + * + * Upon success, it returns the id/name (generated or provided). The + * returned string is owned by the corresponding child property and must + * not be freed by the caller. + */ +const char *qdev_set_id(DeviceState *dev, char *id, Error **errp); #endif diff --git a/include/net/net.h b/include/net/net.h index 5d1508081f..523136c7ac 100644 --- a/include/net/net.h +++ b/include/net/net.h @@ -62,6 +62,7 @@ typedef struct SocketReadState SocketReadState; typedef void (SocketReadStateFinalize)(SocketReadState *rs); typedef void (NetAnnounce)(NetClientState *); typedef bool (SetSteeringEBPF)(NetClientState *, int); +typedef bool (NetCheckPeerType)(NetClientState *, ObjectClass *, Error **); typedef struct NetClientInfo { NetClientDriver type; @@ -84,6 +85,7 @@ typedef struct NetClientInfo { SetVnetBE *set_vnet_be; NetAnnounce *announce; SetSteeringEBPF *set_steering_ebpf; + NetCheckPeerType *check_peer_type; } NetClientInfo; struct NetClientState { @@ -103,6 +105,7 @@ struct NetClientState { int vnet_hdr_len; bool is_netdev; bool do_not_pad; /* do not pad to the minimum ethernet frame length */ + bool is_datapath; QTAILQ_HEAD(, NetFilterState) filters; }; @@ -134,6 +137,10 @@ NetClientState *qemu_new_net_client(NetClientInfo *info, NetClientState *peer, const char *model, const char *name); +NetClientState *qemu_new_net_control_client(NetClientInfo *info, + NetClientState *peer, + const char *model, + const char *name); NICState *qemu_new_nic(NetClientInfo *info, NICConf *conf, const char *model, diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h index fba40cf695..387e913e4e 100644 --- a/include/net/vhost_net.h +++ b/include/net/vhost_net.h @@ -21,8 +21,10 @@ typedef struct VhostNetOptions { uint64_t vhost_net_get_max_queues(VHostNetState *net); struct vhost_net *vhost_net_init(VhostNetOptions *options); -int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, int total_queues); -void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, int total_queues); +int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, + int data_queue_pairs, int cvq); +void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, + int data_queue_pairs, int cvq); void vhost_net_cleanup(VHostNetState *net); diff --git a/include/qapi/compat-policy.h b/include/qapi/compat-policy.h index 1083f95122..8b7b25c0b5 100644 --- a/include/qapi/compat-policy.h +++ b/include/qapi/compat-policy.h @@ -13,10 +13,17 @@ #ifndef QAPI_COMPAT_POLICY_H #define QAPI_COMPAT_POLICY_H +#include "qapi/error.h" #include "qapi/qapi-types-compat.h" extern CompatPolicy compat_policy; +bool compat_policy_input_ok(unsigned special_features, + const CompatPolicy *policy, + ErrorClass error_class, + const char *kind, const char *name, + Error **errp); + /* * Create a QObject input visitor for @obj for use with QMP * diff --git a/include/qapi/qmp/dispatch.h b/include/qapi/qmp/dispatch.h index 075203dc67..1e4240fd0d 100644 --- a/include/qapi/qmp/dispatch.h +++ b/include/qapi/qmp/dispatch.h @@ -21,12 +21,10 @@ typedef void (QmpCommandFunc)(QDict *, QObject **, Error **); typedef enum QmpCommandOptions { - QCO_NO_OPTIONS = 0x0, QCO_NO_SUCCESS_RESP = (1U << 0), QCO_ALLOW_OOB = (1U << 1), QCO_ALLOW_PRECONFIG = (1U << 2), QCO_COROUTINE = (1U << 3), - QCO_DEPRECATED = (1U << 4), } QmpCommandOptions; typedef struct QmpCommand @@ -35,6 +33,7 @@ typedef struct QmpCommand /* Runs in coroutine context if QCO_COROUTINE is set */ QmpCommandFunc *fn; QmpCommandOptions options; + unsigned special_features; QTAILQ_ENTRY(QmpCommand) node; bool enabled; const char *disable_reason; @@ -43,7 +42,8 @@ typedef struct QmpCommand typedef QTAILQ_HEAD(QmpCommandList, QmpCommand) QmpCommandList; void qmp_register_command(QmpCommandList *cmds, const char *name, - QmpCommandFunc *fn, QmpCommandOptions options); + QmpCommandFunc *fn, QmpCommandOptions options, + unsigned special_features); const QmpCommand *qmp_find_command(const QmpCommandList *cmds, const char *name); void qmp_disable_command(QmpCommandList *cmds, const char *name, diff --git a/include/qapi/qobject-input-visitor.h b/include/qapi/qobject-input-visitor.h index 8d69388810..95985e25e5 100644 --- a/include/qapi/qobject-input-visitor.h +++ b/include/qapi/qobject-input-visitor.h @@ -15,7 +15,6 @@ #ifndef QOBJECT_INPUT_VISITOR_H #define QOBJECT_INPUT_VISITOR_H -#include "qapi/qapi-types-compat.h" #include "qapi/visitor.h" typedef struct QObjectInputVisitor QObjectInputVisitor; @@ -59,9 +58,6 @@ typedef struct QObjectInputVisitor QObjectInputVisitor; */ Visitor *qobject_input_visitor_new(QObject *obj); -void qobject_input_visitor_set_policy(Visitor *v, - CompatPolicyInput deprecated); - /* * Create a QObject input visitor for @obj for use with keyval_parse() * diff --git a/include/qapi/qobject-output-visitor.h b/include/qapi/qobject-output-visitor.h index f2a2f92a00..2b1726baf5 100644 --- a/include/qapi/qobject-output-visitor.h +++ b/include/qapi/qobject-output-visitor.h @@ -15,7 +15,6 @@ #define QOBJECT_OUTPUT_VISITOR_H #include "qapi/visitor.h" -#include "qapi/qapi-types-compat.h" typedef struct QObjectOutputVisitor QObjectOutputVisitor; @@ -54,7 +53,4 @@ typedef struct QObjectOutputVisitor QObjectOutputVisitor; */ Visitor *qobject_output_visitor_new(QObject **result); -void qobject_output_visitor_set_policy(Visitor *v, - CompatPolicyOutput deprecated); - #endif diff --git a/include/qapi/type-helpers.h b/include/qapi/type-helpers.h new file mode 100644 index 0000000000..be1f181526 --- /dev/null +++ b/include/qapi/type-helpers.h @@ -0,0 +1,14 @@ +/* + * QAPI common helper functions + * + * This file provides helper functions related to types defined + * in the QAPI schema. + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include "qapi/qapi-types-common.h" + +HumanReadableText *human_readable_text_from_str(GString *str); diff --git a/include/qapi/util.h b/include/qapi/util.h index d7bfb30e25..81a2b13a33 100644 --- a/include/qapi/util.h +++ b/include/qapi/util.h @@ -11,9 +11,15 @@ #ifndef QAPI_UTIL_H #define QAPI_UTIL_H +typedef enum { + QAPI_DEPRECATED, + QAPI_UNSTABLE, +} QapiSpecialFeature; + typedef struct QEnumLookup { const char *const *array; - int size; + const unsigned char *const special_features; + const int size; } QEnumLookup; const char *qapi_enum_lookup(const QEnumLookup *lookup, int val); diff --git a/include/qapi/visitor-impl.h b/include/qapi/visitor-impl.h index 3b950f6e3d..2badec5ba4 100644 --- a/include/qapi/visitor-impl.h +++ b/include/qapi/visitor-impl.h @@ -114,14 +114,19 @@ struct Visitor void (*optional)(Visitor *v, const char *name, bool *present); /* Optional */ - bool (*deprecated_accept)(Visitor *v, const char *name, Error **errp); + bool (*policy_reject)(Visitor *v, const char *name, + unsigned special_features, Error **errp); /* Optional */ - bool (*deprecated)(Visitor *v, const char *name); + bool (*policy_skip)(Visitor *v, const char *name, + unsigned special_features); /* Must be set */ VisitorType type; + /* Optional */ + struct CompatPolicy compat_policy; + /* Must be set for output visitors, optional otherwise. */ void (*complete)(Visitor *v, void *opaque); diff --git a/include/qapi/visitor.h b/include/qapi/visitor.h index b3c9ef7a81..d53a84c9ba 100644 --- a/include/qapi/visitor.h +++ b/include/qapi/visitor.h @@ -16,6 +16,7 @@ #define QAPI_VISITOR_H #include "qapi/qapi-builtin-types.h" +#include "qapi/qapi-types-compat.h" /* * The QAPI schema defines both a set of C data types, and a QMP wire @@ -460,22 +461,39 @@ void visit_end_alternate(Visitor *v, void **obj); bool visit_optional(Visitor *v, const char *name, bool *present); /* - * Should we reject deprecated member @name? + * Should we reject member @name due to policy? + * + * @special_features is the member's special features encoded as a + * bitset of QapiSpecialFeature. * * @name must not be NULL. This function is only useful between * visit_start_struct() and visit_end_struct(), since only objects * have deprecated members. */ -bool visit_deprecated_accept(Visitor *v, const char *name, Error **errp); +bool visit_policy_reject(Visitor *v, const char *name, + unsigned special_features, Error **errp); /* - * Should we visit deprecated member @name? + * + * Should we skip member @name due to policy? + * + * @special_features is the member's special features encoded as a + * bitset of QapiSpecialFeature. * * @name must not be NULL. This function is only useful between * visit_start_struct() and visit_end_struct(), since only objects * have deprecated members. */ -bool visit_deprecated(Visitor *v, const char *name); +bool visit_policy_skip(Visitor *v, const char *name, + unsigned special_features); + +/* + * Set policy for handling deprecated management interfaces. + * + * Intended use: call visit_set_policy(v, &compat_policy) when + * visiting management interface input or output. + */ +void visit_set_policy(Visitor *v, CompatPolicy *policy); /* * Visit an enum value. diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h index 711b221704..ca979dc6cc 100644 --- a/include/qemu/host-utils.h +++ b/include/qemu/host-utils.h @@ -23,6 +23,10 @@ * THE SOFTWARE. */ +/* Portions of this work are licensed under the terms of the GNU GPL, + * version 2 or later. See the COPYING file in the top-level directory. + */ + #ifndef HOST_UTILS_H #define HOST_UTILS_H @@ -52,36 +56,32 @@ static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c) return (__int128_t)a * b / c; } -static inline int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor) +static inline uint64_t divu128(uint64_t *plow, uint64_t *phigh, + uint64_t divisor) { - if (divisor == 0) { - return 1; - } else { - __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow; - __uint128_t result = dividend / divisor; - *plow = result; - *phigh = dividend % divisor; - return result > UINT64_MAX; - } + __uint128_t dividend = ((__uint128_t)*phigh << 64) | *plow; + __uint128_t result = dividend / divisor; + + *plow = result; + *phigh = result >> 64; + return dividend % divisor; } -static inline int divs128(int64_t *plow, int64_t *phigh, int64_t divisor) +static inline int64_t divs128(uint64_t *plow, int64_t *phigh, + int64_t divisor) { - if (divisor == 0) { - return 1; - } else { - __int128_t dividend = ((__int128_t)*phigh << 64) | *plow; - __int128_t result = dividend / divisor; - *plow = result; - *phigh = dividend % divisor; - return result != *plow; - } + __int128_t dividend = ((__int128_t)*phigh << 64) | *plow; + __int128_t result = dividend / divisor; + + *plow = result; + *phigh = result >> 64; + return dividend % divisor; } #else void muls64(uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b); void mulu64(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b); -int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor); -int divs128(int64_t *plow, int64_t *phigh, int64_t divisor); +uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor); +int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor); static inline uint64_t muldiv64(uint64_t a, uint32_t b, uint32_t c) { @@ -357,6 +357,14 @@ static inline uint64_t revbit64(uint64_t x) #endif } +/** + * Return the absolute value of a 64-bit integer as an unsigned 64-bit value + */ +static inline uint64_t uabs64(int64_t v) +{ + return v < 0 ? -v : v; +} + /** * sadd32_overflow - addition with overflow indication * @x, @y: addends @@ -582,6 +590,42 @@ static inline bool umul64_overflow(uint64_t x, uint64_t y, uint64_t *ret) #endif } +/* + * Unsigned 128x64 multiplication. + * Returns true if the result got truncated to 128 bits. + * Otherwise, returns false and the multiplication result via plow and phigh. + */ +static inline bool mulu128(uint64_t *plow, uint64_t *phigh, uint64_t factor) +{ +#if defined(CONFIG_INT128) && \ + (__has_builtin(__builtin_mul_overflow) || __GNUC__ >= 5) + bool res; + __uint128_t r; + __uint128_t f = ((__uint128_t)*phigh << 64) | *plow; + res = __builtin_mul_overflow(f, factor, &r); + + *plow = r; + *phigh = r >> 64; + + return res; +#else + uint64_t dhi = *phigh; + uint64_t dlo = *plow; + uint64_t ahi; + uint64_t blo, bhi; + + if (dhi == 0) { + mulu64(plow, phigh, dlo, factor); + return false; + } + + mulu64(plow, &ahi, dlo, factor); + mulu64(&blo, &bhi, dhi, factor); + + return uadd64_overflow(ahi, blo, phigh) || bhi != 0; +#endif +} + /** * uadd64_carry - addition with carry-in and carry-out * @x, @y: addends @@ -728,4 +772,81 @@ void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift); */ void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow); +/* From the GNU Multi Precision Library - longlong.h __udiv_qrnnd + * (https://gmplib.org/repo/gmp/file/tip/longlong.h) + * + * Licensed under the GPLv2/LGPLv3 + */ +static inline uint64_t udiv_qrnnd(uint64_t *r, uint64_t n1, + uint64_t n0, uint64_t d) +{ +#if defined(__x86_64__) + uint64_t q; + asm("divq %4" : "=a"(q), "=d"(*r) : "0"(n0), "1"(n1), "rm"(d)); + return q; +#elif defined(__s390x__) && !defined(__clang__) + /* Need to use a TImode type to get an even register pair for DLGR. */ + unsigned __int128 n = (unsigned __int128)n1 << 64 | n0; + asm("dlgr %0, %1" : "+r"(n) : "r"(d)); + *r = n >> 64; + return n; +#elif defined(_ARCH_PPC64) && defined(_ARCH_PWR7) + /* From Power ISA 2.06, programming note for divdeu. */ + uint64_t q1, q2, Q, r1, r2, R; + asm("divdeu %0,%2,%4; divdu %1,%3,%4" + : "=&r"(q1), "=r"(q2) + : "r"(n1), "r"(n0), "r"(d)); + r1 = -(q1 * d); /* low part of (n1<<64) - (q1 * d) */ + r2 = n0 - (q2 * d); + Q = q1 + q2; + R = r1 + r2; + if (R >= d || R < r2) { /* overflow implies R > d */ + Q += 1; + R -= d; + } + *r = R; + return Q; +#else + uint64_t d0, d1, q0, q1, r1, r0, m; + + d0 = (uint32_t)d; + d1 = d >> 32; + + r1 = n1 % d1; + q1 = n1 / d1; + m = q1 * d0; + r1 = (r1 << 32) | (n0 >> 32); + if (r1 < m) { + q1 -= 1; + r1 += d; + if (r1 >= d) { + if (r1 < m) { + q1 -= 1; + r1 += d; + } + } + } + r1 -= m; + + r0 = r1 % d1; + q0 = r1 / d1; + m = q0 * d0; + r0 = (r0 << 32) | (uint32_t)n0; + if (r0 < m) { + q0 -= 1; + r0 += d; + if (r0 >= d) { + if (r0 < m) { + q0 -= 1; + r0 += d; + } + } + } + r0 -= m; + + *r = r0; + return (q1 << 32) | q0; +#endif +} + #endif diff --git a/include/qemu/int128.h b/include/qemu/int128.h index 2ac0746426..b6d517aea4 100644 --- a/include/qemu/int128.h +++ b/include/qemu/int128.h @@ -58,6 +58,11 @@ static inline Int128 int128_exts64(int64_t a) return a; } +static inline Int128 int128_not(Int128 a) +{ + return ~a; +} + static inline Int128 int128_and(Int128 a, Int128 b) { return a & b; @@ -68,6 +73,11 @@ static inline Int128 int128_or(Int128 a, Int128 b) return a | b; } +static inline Int128 int128_xor(Int128 a, Int128 b) +{ + return a ^ b; +} + static inline Int128 int128_rshift(Int128 a, int n) { return a >> n; @@ -235,6 +245,11 @@ static inline Int128 int128_exts64(int64_t a) return int128_make128(a, (a < 0) ? -1 : 0); } +static inline Int128 int128_not(Int128 a) +{ + return int128_make128(~a.lo, ~a.hi); +} + static inline Int128 int128_and(Int128 a, Int128 b) { return int128_make128(a.lo & b.lo, a.hi & b.hi); @@ -245,6 +260,11 @@ static inline Int128 int128_or(Int128 a, Int128 b) return int128_make128(a.lo | b.lo, a.hi | b.hi); } +static inline Int128 int128_xor(Int128 a, Int128 b) +{ + return int128_make128(a.lo ^ b.lo, a.hi ^ b.hi); +} + static inline Int128 int128_rshift(Int128 a, int n) { int64_t h; diff --git a/include/qemu/iova-tree.h b/include/qemu/iova-tree.h index b66cf93c4b..8249edd764 100644 --- a/include/qemu/iova-tree.h +++ b/include/qemu/iova-tree.h @@ -59,7 +59,7 @@ IOVATree *iova_tree_new(void); * * Return: 0 if succeeded, or <0 if error. */ -int iova_tree_insert(IOVATree *tree, DMAMap *map); +int iova_tree_insert(IOVATree *tree, const DMAMap *map); /** * iova_tree_remove: @@ -74,7 +74,7 @@ int iova_tree_insert(IOVATree *tree, DMAMap *map); * * Return: 0 if succeeded, or <0 if error. */ -int iova_tree_remove(IOVATree *tree, DMAMap *map); +int iova_tree_remove(IOVATree *tree, const DMAMap *map); /** * iova_tree_find: @@ -92,7 +92,7 @@ int iova_tree_remove(IOVATree *tree, DMAMap *map); * user is responsible to make sure the pointer is valid (say, no * concurrent deletion in progress). */ -DMAMap *iova_tree_find(IOVATree *tree, DMAMap *map); +const DMAMap *iova_tree_find(const IOVATree *tree, const DMAMap *map); /** * iova_tree_find_address: @@ -105,7 +105,7 @@ DMAMap *iova_tree_find(IOVATree *tree, DMAMap *map); * * Return: same as iova_tree_find(). */ -DMAMap *iova_tree_find_address(IOVATree *tree, hwaddr iova); +const DMAMap *iova_tree_find_address(const IOVATree *tree, hwaddr iova); /** * iova_tree_foreach: diff --git a/include/qemu/job.h b/include/qemu/job.h index 41162ed494..6e67b6977f 100644 --- a/include/qemu/job.h +++ b/include/qemu/job.h @@ -253,8 +253,17 @@ struct JobDriver { /** * If the callback is not NULL, it will be invoked in job_cancel_async + * + * This function must return true if the job will be cancelled + * immediately without any further I/O (mandatory if @force is + * true), and false otherwise. This lets the generic job layer + * know whether a job has been truly (force-)cancelled, or whether + * it is just in a special completion mode (like mirror after + * READY). + * (If the callback is NULL, the job is assumed to terminate + * without I/O.) */ - void (*cancel)(Job *job, bool force); + bool (*cancel)(Job *job, bool force); /** Called when the job is freed */ @@ -427,9 +436,15 @@ const char *job_type_str(const Job *job); /** Returns true if the job should not be visible to the management layer. */ bool job_is_internal(Job *job); -/** Returns whether the job is scheduled for cancellation. */ +/** Returns whether the job is being cancelled. */ bool job_is_cancelled(Job *job); +/** + * Returns whether the job is scheduled for cancellation (at an + * indefinite point). + */ +bool job_cancel_requested(Job *job); + /** Returns whether the job is in a completed state. */ bool job_is_completed(Job *job); @@ -506,18 +521,18 @@ void job_user_cancel(Job *job, bool force, Error **errp); /** * Synchronously cancel the @job. The completion callback is called - * before the function returns. The job may actually complete - * instead of canceling itself; the circumstances under which this - * happens depend on the kind of job that is active. + * before the function returns. If @force is false, the job may + * actually complete instead of canceling itself; the circumstances + * under which this happens depend on the kind of job that is active. * * Returns the return value from the job if the job actually completed * during the call, or -ECANCELED if it was canceled. * * Callers must hold the AioContext lock of job->aio_context. */ -int job_cancel_sync(Job *job); +int job_cancel_sync(Job *job, bool force); -/** Synchronously cancels all jobs using job_cancel_sync(). */ +/** Synchronously force-cancels all jobs using job_cancel_sync(). */ void job_cancel_sync_all(void); /** diff --git a/include/qemu/module.h b/include/qemu/module.h index 3deac0078b..5fcc323b2a 100644 --- a/include/qemu/module.h +++ b/include/qemu/module.h @@ -77,14 +77,14 @@ void module_allow_arch(const char *arch); /** * DOC: module info annotation macros * - * `scripts/modinfo-collect.py` will collect module info, + * ``scripts/modinfo-collect.py`` will collect module info, * using the preprocessor and -DQEMU_MODINFO. * - * `scripts/modinfo-generate.py` will create a module meta-data database + * ``scripts/modinfo-generate.py`` will create a module meta-data database * from the collected information so qemu knows about module * dependencies and QOM objects implemented by modules. * - * See `*.modinfo` and `modinfo.c` in the build directory to check the + * See ``*.modinfo`` and ``modinfo.c`` in the build directory to check the * script results. */ #ifdef QEMU_MODINFO diff --git a/include/qemu/plugin.h b/include/qemu/plugin.h index 9a8438f683..145f8a221a 100644 --- a/include/qemu/plugin.h +++ b/include/qemu/plugin.h @@ -12,6 +12,7 @@ #include "qemu/error-report.h" #include "qemu/queue.h" #include "qemu/option.h" +#include "exec/memopidx.h" /* * Events that plugins can subscribe to. @@ -36,6 +37,25 @@ enum qemu_plugin_event { struct qemu_plugin_desc; typedef QTAILQ_HEAD(, qemu_plugin_desc) QemuPluginList; +/* + * Construct a qemu_plugin_meminfo_t. + */ +static inline qemu_plugin_meminfo_t +make_plugin_meminfo(MemOpIdx oi, enum qemu_plugin_mem_rw rw) +{ + return oi | (rw << 16); +} + +/* + * Extract the memory operation direction from a qemu_plugin_meminfo_t. + * Other portions may be extracted via get_memop and get_mmuidx. + */ +static inline enum qemu_plugin_mem_rw +get_plugin_meminfo_rw(qemu_plugin_meminfo_t i) +{ + return i >> 16; +} + #ifdef CONFIG_PLUGIN extern QemuOptsList qemu_plugin_opts; @@ -143,10 +163,12 @@ struct qemu_plugin_tb { /** * qemu_plugin_tb_insn_get(): get next plugin record for translation. - * + * @tb: the internal tb context + * @pc: address of instruction */ static inline -struct qemu_plugin_insn *qemu_plugin_tb_insn_get(struct qemu_plugin_tb *tb) +struct qemu_plugin_insn *qemu_plugin_tb_insn_get(struct qemu_plugin_tb *tb, + uint64_t pc) { struct qemu_plugin_insn *insn; int i, j; @@ -159,6 +181,7 @@ struct qemu_plugin_insn *qemu_plugin_tb_insn_get(struct qemu_plugin_tb *tb) g_byte_array_set_size(insn->data, 0); insn->calls_helpers = false; insn->mem_helper = false; + insn->vaddr = pc; for (i = 0; i < PLUGIN_N_CB_TYPES; i++) { for (j = 0; j < PLUGIN_N_CB_SUBTYPES; j++) { @@ -180,7 +203,8 @@ qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, uint64_t a6, uint64_t a7, uint64_t a8); void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret); -void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, uint32_t meminfo); +void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, + MemOpIdx oi, enum qemu_plugin_mem_rw rw); void qemu_plugin_flush_cb(void); @@ -244,7 +268,8 @@ void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret) { } static inline void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, - uint32_t meminfo) + MemOpIdx oi, + enum qemu_plugin_mem_rw rw) { } static inline void qemu_plugin_flush_cb(void) diff --git a/include/qemu/rcu.h b/include/qemu/rcu.h index 515d327cf1..e69efbd47f 100644 --- a/include/qemu/rcu.h +++ b/include/qemu/rcu.h @@ -27,6 +27,7 @@ #include "qemu/thread.h" #include "qemu/queue.h" #include "qemu/atomic.h" +#include "qemu/notify.h" #include "qemu/sys_membarrier.h" #ifdef __cplusplus @@ -66,6 +67,13 @@ struct rcu_reader_data { /* Data used for registry, protected by rcu_registry_lock */ QLIST_ENTRY(rcu_reader_data) node; + + /* + * NotifierList used to force an RCU grace period. Accessed under + * rcu_registry_lock. Note that the notifier is called _outside_ + * the thread! + */ + NotifierList force_rcu; }; extern __thread struct rcu_reader_data rcu_reader; @@ -180,6 +188,13 @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(RCUReadAuto, rcu_read_auto_unlock) #define RCU_READ_LOCK_GUARD() \ g_autoptr(RCUReadAuto) _rcu_read_auto __attribute__((unused)) = rcu_read_auto_lock() +/* + * Force-RCU notifiers tell readers that they should exit their + * read-side critical section. + */ +void rcu_add_force_rcu_notifier(Notifier *n); +void rcu_remove_force_rcu_notifier(Notifier *n); + #ifdef __cplusplus } #endif diff --git a/include/qom/object.h b/include/qom/object.h index faae0d841f..fae096f51c 100644 --- a/include/qom/object.h +++ b/include/qom/object.h @@ -1543,6 +1543,18 @@ Object *object_resolve_path(const char *path, bool *ambiguous); Object *object_resolve_path_type(const char *path, const char *typename, bool *ambiguous); +/** + * object_resolve_path_at: + * @parent: the object in which to resolve the path + * @path: the path to resolve + * + * This is like object_resolve_path(), except paths not starting with + * a slash are relative to @parent. + * + * Returns: The resolved object or NULL on path lookup failure. + */ +Object *object_resolve_path_at(Object *parent, const char *path); + /** * object_resolve_path_component: * @parent: the object in which to resolve the path diff --git a/include/sysemu/block-backend.h b/include/sysemu/block-backend.h index 29d4fdbf63..e5e1524f06 100644 --- a/include/sysemu/block-backend.h +++ b/include/sysemu/block-backend.h @@ -126,38 +126,42 @@ BlockBackend *blk_by_dev(void *dev); BlockBackend *blk_by_qdev_id(const char *id, Error **errp); void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, void *opaque); int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset, - unsigned int bytes, QEMUIOVector *qiov, + int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags); int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset, - unsigned int bytes, + int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset, BdrvRequestFlags flags); int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset, - unsigned int bytes, QEMUIOVector *qiov, + int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags); static inline int coroutine_fn blk_co_pread(BlockBackend *blk, int64_t offset, - unsigned int bytes, void *buf, + int64_t bytes, void *buf, BdrvRequestFlags flags) { QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); + assert(bytes <= SIZE_MAX); + return blk_co_preadv(blk, offset, bytes, &qiov, flags); } static inline int coroutine_fn blk_co_pwrite(BlockBackend *blk, int64_t offset, - unsigned int bytes, void *buf, + int64_t bytes, void *buf, BdrvRequestFlags flags) { QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes); + assert(bytes <= SIZE_MAX); + return blk_co_pwritev(blk, offset, bytes, &qiov, flags); } int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset, - int bytes, BdrvRequestFlags flags); + int64_t bytes, BdrvRequestFlags flags); BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset, - int bytes, BdrvRequestFlags flags, + int64_t bytes, BdrvRequestFlags flags, BlockCompletionFunc *cb, void *opaque); int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags); int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int bytes); @@ -174,15 +178,16 @@ BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset, BlockCompletionFunc *cb, void *opaque); BlockAIOCB *blk_aio_flush(BlockBackend *blk, BlockCompletionFunc *cb, void *opaque); -BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, int64_t offset, int bytes, +BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes, BlockCompletionFunc *cb, void *opaque); void blk_aio_cancel(BlockAIOCB *acb); void blk_aio_cancel_async(BlockAIOCB *acb); int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf); BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf, BlockCompletionFunc *cb, void *opaque); -int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes); -int blk_co_flush(BlockBackend *blk); +int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset, + int64_t bytes); +int coroutine_fn blk_co_flush(BlockBackend *blk); int blk_flush(BlockBackend *blk); int blk_commit_all(void); void blk_inc_in_flight(BlockBackend *blk); @@ -211,6 +216,7 @@ uint32_t blk_get_request_alignment(BlockBackend *blk); uint32_t blk_get_max_transfer(BlockBackend *blk); uint64_t blk_get_max_hw_transfer(BlockBackend *blk); int blk_get_max_iov(BlockBackend *blk); +int blk_get_max_hw_iov(BlockBackend *blk); void blk_set_guest_block_size(BlockBackend *blk, int align); void *blk_try_blockalign(BlockBackend *blk, size_t size); void *blk_blockalign(BlockBackend *blk, size_t size); @@ -242,12 +248,12 @@ int blk_get_open_flags_from_root_state(BlockBackend *blk); void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk, BlockCompletionFunc *cb, void *opaque); int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset, - int bytes, BdrvRequestFlags flags); + int64_t bytes, BdrvRequestFlags flags); int blk_pwrite_compressed(BlockBackend *blk, int64_t offset, const void *buf, - int bytes); + int64_t bytes); int blk_truncate(BlockBackend *blk, int64_t offset, bool exact, PreallocMode prealloc, BdrvRequestFlags flags, Error **errp); -int blk_pdiscard(BlockBackend *blk, int64_t offset, int bytes); +int blk_pdiscard(BlockBackend *blk, int64_t offset, int64_t bytes); int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf, int64_t pos, int size); int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size); @@ -268,7 +274,7 @@ void blk_unregister_buf(BlockBackend *blk, void *host); int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in, BlockBackend *blk_out, int64_t off_out, - int bytes, BdrvRequestFlags read_flags, + int64_t bytes, BdrvRequestFlags read_flags, BdrvRequestFlags write_flags); const BdrvChild *blk_root(BlockBackend *blk); diff --git a/include/sysemu/device_tree.h b/include/sysemu/device_tree.h index 8a2fe55622..ef060a9759 100644 --- a/include/sysemu/device_tree.h +++ b/include/sysemu/device_tree.h @@ -121,6 +121,7 @@ uint32_t qemu_fdt_get_phandle(void *fdt, const char *path); uint32_t qemu_fdt_alloc_phandle(void *fdt); int qemu_fdt_nop_node(void *fdt, const char *node_path); int qemu_fdt_add_subnode(void *fdt, const char *name); +int qemu_fdt_add_path(void *fdt, const char *path); #define qemu_fdt_setprop_cells(fdt, node_path, property, ...) \ do { \ diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h index a1ab1ee12d..7b22aeb6ae 100644 --- a/include/sysemu/kvm.h +++ b/include/sysemu/kvm.h @@ -547,4 +547,5 @@ bool kvm_cpu_check_are_resettable(void); bool kvm_arch_cpu_check_are_resettable(void); +bool kvm_dirty_ring_enabled(void); #endif diff --git a/include/sysemu/kvm_int.h b/include/sysemu/kvm_int.h index c788452cd9..1f5487d9b7 100644 --- a/include/sysemu/kvm_int.h +++ b/include/sysemu/kvm_int.h @@ -37,7 +37,7 @@ typedef struct KVMMemoryListener { } KVMMemoryListener; void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, - AddressSpace *as, int as_id); + AddressSpace *as, int as_id, const char *name); void kvm_set_max_memslot_size(hwaddr max_slot_size); diff --git a/include/sysemu/sev.h b/include/sysemu/sev.h deleted file mode 100644 index 94d821d737..0000000000 --- a/include/sysemu/sev.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * QEMU Secure Encrypted Virutualization (SEV) support - * - * Copyright: Advanced Micro Devices, 2016-2018 - * - * Authors: - * Brijesh Singh - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -#ifndef QEMU_SEV_H -#define QEMU_SEV_H - -#include "sysemu/kvm.h" - -bool sev_enabled(void); -int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp); -int sev_encrypt_flash(uint8_t *ptr, uint64_t len, Error **errp); -int sev_inject_launch_secret(const char *hdr, const char *secret, - uint64_t gpa, Error **errp); - -int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size); -void sev_es_set_reset_vector(CPUState *cpu); - -#endif diff --git a/include/sysemu/watchdog.h b/include/sysemu/watchdog.h index a08d16380d..d2d4901dbb 100644 --- a/include/sysemu/watchdog.h +++ b/include/sysemu/watchdog.h @@ -37,7 +37,6 @@ typedef struct WatchdogTimerModel WatchdogTimerModel; /* in hw/watchdog.c */ int select_watchdog(const char *p); -int select_watchdog_action(const char *action); WatchdogAction get_watchdog_action(void); void watchdog_add_model(WatchdogTimerModel *model); void watchdog_perform_action(void); diff --git a/include/tcg/tcg-ldst.h b/include/tcg/tcg-ldst.h new file mode 100644 index 0000000000..bf40942de4 --- /dev/null +++ b/include/tcg/tcg-ldst.h @@ -0,0 +1,79 @@ +/* + * Memory helpers that will be used by TCG generated code. + * + * Copyright (c) 2008 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#ifndef TCG_LDST_H +#define TCG_LDST_H 1 + +#ifdef CONFIG_SOFTMMU + +/* Value zero-extended to tcg register size. */ +tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); + +/* Value sign-extended to tcg register size. */ +tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); +tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, + MemOpIdx oi, uintptr_t retaddr); + +void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, + MemOpIdx oi, uintptr_t retaddr); +void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + MemOpIdx oi, uintptr_t retaddr); +void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr); +void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr); +void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, + MemOpIdx oi, uintptr_t retaddr); +void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, + MemOpIdx oi, uintptr_t retaddr); +void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, + MemOpIdx oi, uintptr_t retaddr); + +#else + +void QEMU_NORETURN helper_unaligned_ld(CPUArchState *env, target_ulong addr); +void QEMU_NORETURN helper_unaligned_st(CPUArchState *env, target_ulong addr); + +#endif /* CONFIG_SOFTMMU */ +#endif /* TCG_LDST_H */ diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h index 44ccd86f3e..42f5b500ed 100644 --- a/include/tcg/tcg.h +++ b/include/tcg/tcg.h @@ -27,12 +27,12 @@ #include "cpu.h" #include "exec/memop.h" +#include "exec/memopidx.h" #include "qemu/bitops.h" #include "qemu/plugin.h" #include "qemu/queue.h" #include "tcg/tcg-mo.h" #include "tcg-target.h" -#include "qemu/int128.h" #include "tcg/tcg-cond.h" /* XXX: make safe guess about sizes */ @@ -492,9 +492,6 @@ typedef struct TCGOp { /* Next and previous opcodes. */ QTAILQ_ENTRY(TCGOp) link; -#ifdef CONFIG_PLUGIN - QSIMPLEQ_ENTRY(TCGOp) plugin_link; -#endif /* Arguments for the opcode. */ TCGArg args[MAX_OPC_PARAM]; @@ -604,9 +601,6 @@ struct TCGContext { /* descriptor of the instruction being translated */ struct qemu_plugin_insn *plugin_insn; - - /* list to quickly access the injected ops */ - QSIMPLEQ_HEAD(, TCGOp) plugin_ops; #endif GHashTable *const_table[TCG_TYPE_COUNT]; @@ -943,8 +937,8 @@ int tcg_check_temp_count(void); #endif int64_t tcg_cpu_exec_time(void); -void tcg_dump_info(void); -void tcg_dump_op_count(void); +void tcg_dump_info(GString *buf); +void tcg_dump_op_count(GString *buf); #define TCG_CT_CONST 1 /* any constant of register size */ @@ -1147,44 +1141,6 @@ static inline size_t tcg_current_code_size(TCGContext *s) return tcg_ptr_byte_diff(s->code_ptr, s->code_buf); } -/* Combine the MemOp and mmu_idx parameters into a single value. */ -typedef uint32_t TCGMemOpIdx; - -/** - * make_memop_idx - * @op: memory operation - * @idx: mmu index - * - * Encode these values into a single parameter. - */ -static inline TCGMemOpIdx make_memop_idx(MemOp op, unsigned idx) -{ - tcg_debug_assert(idx <= 15); - return (op << 4) | idx; -} - -/** - * get_memop - * @oi: combined op/idx parameter - * - * Extract the memory operation from the combined value. - */ -static inline MemOp get_memop(TCGMemOpIdx oi) -{ - return oi >> 4; -} - -/** - * get_mmuidx - * @oi: combined op/idx parameter - * - * Extract the mmu index from the combined value. - */ -static inline unsigned get_mmuidx(TCGMemOpIdx oi) -{ - return oi & 15; -} - /** * tcg_qemu_tb_exec: * @env: pointer to CPUArchState for the CPU @@ -1272,162 +1228,17 @@ uint64_t dup_const(unsigned vece, uint64_t c); : (qemu_build_not_reached_always(), 0)) \ : dup_const(VECE, C)) -/* - * Memory helpers that will be used by TCG generated code. - */ -#ifdef CONFIG_SOFTMMU -/* Value zero-extended to tcg register size. */ -tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); - -/* Value sign-extended to tcg register size. */ -tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); - -void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, - TCGMemOpIdx oi, uintptr_t retaddr); -void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, - TCGMemOpIdx oi, uintptr_t retaddr); -void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - TCGMemOpIdx oi, uintptr_t retaddr); -void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - TCGMemOpIdx oi, uintptr_t retaddr); -void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, - TCGMemOpIdx oi, uintptr_t retaddr); -void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, - TCGMemOpIdx oi, uintptr_t retaddr); -void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, - TCGMemOpIdx oi, uintptr_t retaddr); - -/* Temporary aliases until backends are converted. */ -#ifdef TARGET_WORDS_BIGENDIAN -# define helper_ret_ldsw_mmu helper_be_ldsw_mmu -# define helper_ret_lduw_mmu helper_be_lduw_mmu -# define helper_ret_ldsl_mmu helper_be_ldsl_mmu -# define helper_ret_ldul_mmu helper_be_ldul_mmu -# define helper_ret_ldl_mmu helper_be_ldul_mmu -# define helper_ret_ldq_mmu helper_be_ldq_mmu -# define helper_ret_stw_mmu helper_be_stw_mmu -# define helper_ret_stl_mmu helper_be_stl_mmu -# define helper_ret_stq_mmu helper_be_stq_mmu +#if TARGET_LONG_BITS == 64 +# define dup_const_tl dup_const #else -# define helper_ret_ldsw_mmu helper_le_ldsw_mmu -# define helper_ret_lduw_mmu helper_le_lduw_mmu -# define helper_ret_ldsl_mmu helper_le_ldsl_mmu -# define helper_ret_ldul_mmu helper_le_ldul_mmu -# define helper_ret_ldl_mmu helper_le_ldul_mmu -# define helper_ret_ldq_mmu helper_le_ldq_mmu -# define helper_ret_stw_mmu helper_le_stw_mmu -# define helper_ret_stl_mmu helper_le_stl_mmu -# define helper_ret_stq_mmu helper_le_stq_mmu +# define dup_const_tl(VECE, C) \ + (__builtin_constant_p(VECE) \ + ? ( (VECE) == MO_8 ? 0x01010101ul * (uint8_t)(C) \ + : (VECE) == MO_16 ? 0x00010001ul * (uint16_t)(C) \ + : (VECE) == MO_32 ? 0x00000001ul * (uint32_t)(C) \ + : (qemu_build_not_reached_always(), 0)) \ + : (target_long)dup_const(VECE, C)) #endif -#endif /* CONFIG_SOFTMMU */ - -uint32_t cpu_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr, - uint32_t cmpv, uint32_t newv, - TCGMemOpIdx oi, uintptr_t retaddr); -uint32_t cpu_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr, - uint32_t cmpv, uint32_t newv, - TCGMemOpIdx oi, uintptr_t retaddr); -uint32_t cpu_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr, - uint32_t cmpv, uint32_t newv, - TCGMemOpIdx oi, uintptr_t retaddr); -uint64_t cpu_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr, - uint64_t cmpv, uint64_t newv, - TCGMemOpIdx oi, uintptr_t retaddr); -uint32_t cpu_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr, - uint32_t cmpv, uint32_t newv, - TCGMemOpIdx oi, uintptr_t retaddr); -uint32_t cpu_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr, - uint32_t cmpv, uint32_t newv, - TCGMemOpIdx oi, uintptr_t retaddr); -uint64_t cpu_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr, - uint64_t cmpv, uint64_t newv, - TCGMemOpIdx oi, uintptr_t retaddr); - -#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX) \ -TYPE cpu_atomic_ ## NAME ## SUFFIX ## _mmu \ - (CPUArchState *env, target_ulong addr, TYPE val, \ - TCGMemOpIdx oi, uintptr_t retaddr); - -#ifdef CONFIG_ATOMIC64 -#define GEN_ATOMIC_HELPER_ALL(NAME) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, b) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) \ - GEN_ATOMIC_HELPER(NAME, uint64_t, q_le) \ - GEN_ATOMIC_HELPER(NAME, uint64_t, q_be) -#else -#define GEN_ATOMIC_HELPER_ALL(NAME) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, b) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, w_le) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, w_be) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, l_le) \ - GEN_ATOMIC_HELPER(NAME, uint32_t, l_be) -#endif - -GEN_ATOMIC_HELPER_ALL(fetch_add) -GEN_ATOMIC_HELPER_ALL(fetch_sub) -GEN_ATOMIC_HELPER_ALL(fetch_and) -GEN_ATOMIC_HELPER_ALL(fetch_or) -GEN_ATOMIC_HELPER_ALL(fetch_xor) -GEN_ATOMIC_HELPER_ALL(fetch_smin) -GEN_ATOMIC_HELPER_ALL(fetch_umin) -GEN_ATOMIC_HELPER_ALL(fetch_smax) -GEN_ATOMIC_HELPER_ALL(fetch_umax) - -GEN_ATOMIC_HELPER_ALL(add_fetch) -GEN_ATOMIC_HELPER_ALL(sub_fetch) -GEN_ATOMIC_HELPER_ALL(and_fetch) -GEN_ATOMIC_HELPER_ALL(or_fetch) -GEN_ATOMIC_HELPER_ALL(xor_fetch) -GEN_ATOMIC_HELPER_ALL(smin_fetch) -GEN_ATOMIC_HELPER_ALL(umin_fetch) -GEN_ATOMIC_HELPER_ALL(smax_fetch) -GEN_ATOMIC_HELPER_ALL(umax_fetch) - -GEN_ATOMIC_HELPER_ALL(xchg) - -#undef GEN_ATOMIC_HELPER_ALL -#undef GEN_ATOMIC_HELPER - -Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr, - Int128 cmpv, Int128 newv, - TCGMemOpIdx oi, uintptr_t retaddr); -Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr, - Int128 cmpv, Int128 newv, - TCGMemOpIdx oi, uintptr_t retaddr); - -Int128 cpu_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -Int128 cpu_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); -void cpu_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val, - TCGMemOpIdx oi, uintptr_t retaddr); -void cpu_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val, - TCGMemOpIdx oi, uintptr_t retaddr); #ifdef CONFIG_DEBUG_TCG void tcg_assert_listed_vecop(TCGOpcode); diff --git a/include/ui/console.h b/include/ui/console.h index 244664d727..6d678924f6 100644 --- a/include/ui/console.h +++ b/include/ui/console.h @@ -167,10 +167,15 @@ typedef struct QemuDmaBuf { uint32_t fourcc; uint64_t modifier; uint32_t texture; + uint32_t x; + uint32_t y; + uint32_t scanout_width; + uint32_t scanout_height; bool y0_top; void *sync; int fence_fd; bool allow_fences; + bool draw_submitted; } QemuDmaBuf; typedef struct DisplayState DisplayState; diff --git a/io/dns-resolver.c b/io/dns-resolver.c index a5946a93bf..53b0e8407a 100644 --- a/io/dns-resolver.c +++ b/io/dns-resolver.c @@ -122,7 +122,7 @@ static int qio_dns_resolver_lookup_sync_inet(QIODNSResolver *resolver, .ipv4 = iaddr->ipv4, .has_ipv6 = iaddr->has_ipv6, .ipv6 = iaddr->ipv6, -#ifdef IPPROTO_MPTCP +#ifdef HAVE_IPPROTO_MPTCP .has_mptcp = iaddr->has_mptcp, .mptcp = iaddr->mptcp, #endif diff --git a/iothread.c b/iothread.c index ddbbde61f7..0f98af0f2a 100644 --- a/iothread.c +++ b/iothread.c @@ -215,36 +215,34 @@ static void iothread_complete(UserCreatable *obj, Error **errp) typedef struct { const char *name; ptrdiff_t offset; /* field's byte offset in IOThread struct */ -} PollParamInfo; +} IOThreadParamInfo; -static PollParamInfo poll_max_ns_info = { +static IOThreadParamInfo poll_max_ns_info = { "poll-max-ns", offsetof(IOThread, poll_max_ns), }; -static PollParamInfo poll_grow_info = { +static IOThreadParamInfo poll_grow_info = { "poll-grow", offsetof(IOThread, poll_grow), }; -static PollParamInfo poll_shrink_info = { +static IOThreadParamInfo poll_shrink_info = { "poll-shrink", offsetof(IOThread, poll_shrink), }; -static PollParamInfo aio_max_batch_info = { +static IOThreadParamInfo aio_max_batch_info = { "aio-max-batch", offsetof(IOThread, aio_max_batch), }; static void iothread_get_param(Object *obj, Visitor *v, - const char *name, void *opaque, Error **errp) + const char *name, IOThreadParamInfo *info, Error **errp) { IOThread *iothread = IOTHREAD(obj); - PollParamInfo *info = opaque; int64_t *field = (void *)iothread + info->offset; visit_type_int64(v, name, field, errp); } static bool iothread_set_param(Object *obj, Visitor *v, - const char *name, void *opaque, Error **errp) + const char *name, IOThreadParamInfo *info, Error **errp) { IOThread *iothread = IOTHREAD(obj); - PollParamInfo *info = opaque; int64_t *field = (void *)iothread + info->offset; int64_t value; @@ -266,16 +264,18 @@ static bool iothread_set_param(Object *obj, Visitor *v, static void iothread_get_poll_param(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { + IOThreadParamInfo *info = opaque; - iothread_get_param(obj, v, name, opaque, errp); + iothread_get_param(obj, v, name, info, errp); } static void iothread_set_poll_param(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { IOThread *iothread = IOTHREAD(obj); + IOThreadParamInfo *info = opaque; - if (!iothread_set_param(obj, v, name, opaque, errp)) { + if (!iothread_set_param(obj, v, name, info, errp)) { return; } @@ -291,16 +291,18 @@ static void iothread_set_poll_param(Object *obj, Visitor *v, static void iothread_get_aio_param(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { + IOThreadParamInfo *info = opaque; - iothread_get_param(obj, v, name, opaque, errp); + iothread_get_param(obj, v, name, info, errp); } static void iothread_set_aio_param(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { IOThread *iothread = IOTHREAD(obj); + IOThreadParamInfo *info = opaque; - if (!iothread_set_param(obj, v, name, opaque, errp)) { + if (!iothread_set_param(obj, v, name, info, errp)) { return; } diff --git a/job.c b/job.c index e7a5d28854..dbfa67bb0a 100644 --- a/job.c +++ b/job.c @@ -216,6 +216,13 @@ const char *job_type_str(const Job *job) } bool job_is_cancelled(Job *job) +{ + /* force_cancel may be true only if cancelled is true, too */ + assert(job->cancelled || !job->force_cancel); + return job->force_cancel; +} + +bool job_cancel_requested(Job *job) { return job->cancelled; } @@ -719,8 +726,12 @@ static int job_finalize_single(Job *job) static void job_cancel_async(Job *job, bool force) { if (job->driver->cancel) { - job->driver->cancel(job, force); + force = job->driver->cancel(job, force); + } else { + /* No .cancel() means the job will behave as if force-cancelled */ + force = true; } + if (job->user_paused) { /* Do not call job_enter here, the caller will handle it. */ if (job->driver->user_resume) { @@ -730,14 +741,23 @@ static void job_cancel_async(Job *job, bool force) assert(job->pause_count > 0); job->pause_count--; } - job->cancelled = true; - /* To prevent 'force == false' overriding a previous 'force == true' */ - job->force_cancel |= force; + + /* + * Ignore soft cancel requests after the job is already done + * (We will still invoke job->driver->cancel() above, but if the + * job driver supports soft cancelling and the job is done, that + * should be a no-op, too. We still call it so it can override + * @force.) + */ + if (force || !job->deferred_to_main_loop) { + job->cancelled = true; + /* To prevent 'force == false' overriding a previous 'force == true' */ + job->force_cancel |= force; + } } static void job_completed_txn_abort(Job *job) { - AioContext *outer_ctx = job->aio_context; AioContext *ctx; JobTxn *txn = job->txn; Job *other_job; @@ -751,10 +771,14 @@ static void job_completed_txn_abort(Job *job) txn->aborting = true; job_txn_ref(txn); - /* We can only hold the single job's AioContext lock while calling + /* + * We can only hold the single job's AioContext lock while calling * job_finalize_single() because the finalization callbacks can involve - * calls of AIO_WAIT_WHILE(), which could deadlock otherwise. */ - aio_context_release(outer_ctx); + * calls of AIO_WAIT_WHILE(), which could deadlock otherwise. + * Note that the job's AioContext may change when it is finalized. + */ + job_ref(job); + aio_context_release(job->aio_context); /* Other jobs are effectively cancelled by us, set the status for * them; this job, however, may or may not be cancelled, depending @@ -763,23 +787,37 @@ static void job_completed_txn_abort(Job *job) if (other_job != job) { ctx = other_job->aio_context; aio_context_acquire(ctx); - job_cancel_async(other_job, false); + /* + * This is a transaction: If one job failed, no result will matter. + * Therefore, pass force=true to terminate all other jobs as quickly + * as possible. + */ + job_cancel_async(other_job, true); aio_context_release(ctx); } } while (!QLIST_EMPTY(&txn->jobs)) { other_job = QLIST_FIRST(&txn->jobs); + /* + * The job's AioContext may change, so store it in @ctx so we + * release the same context that we have acquired before. + */ ctx = other_job->aio_context; aio_context_acquire(ctx); if (!job_is_completed(other_job)) { - assert(job_is_cancelled(other_job)); + assert(job_cancel_requested(other_job)); job_finish_sync(other_job, NULL, NULL); } job_finalize_single(other_job); aio_context_release(ctx); } - aio_context_acquire(outer_ctx); + /* + * Use job_ref()/job_unref() so we can read the AioContext here + * even if the job went away during job_finalize_single(). + */ + aio_context_acquire(job->aio_context); + job_unref(job); job_txn_unref(txn); } @@ -942,7 +980,19 @@ void job_cancel(Job *job, bool force) if (!job_started(job)) { job_completed(job); } else if (job->deferred_to_main_loop) { - job_completed_txn_abort(job); + /* + * job_cancel_async() ignores soft-cancel requests for jobs + * that are already done (i.e. deferred to the main loop). We + * have to check again whether the job is really cancelled. + * (job_cancel_requested() and job_is_cancelled() are equivalent + * here, because job_cancel_async() will make soft-cancel + * requests no-ops when deferred_to_main_loop is true. We + * choose to call job_is_cancelled() to show that we invoke + * job_completed_txn_abort() only for force-cancelled jobs.) + */ + if (job_is_cancelled(job)) { + job_completed_txn_abort(job); + } } else { job_enter(job); } @@ -964,9 +1014,21 @@ static void job_cancel_err(Job *job, Error **errp) job_cancel(job, false); } -int job_cancel_sync(Job *job) +/** + * Same as job_cancel_err(), but force-cancel. + */ +static void job_force_cancel_err(Job *job, Error **errp) { - return job_finish_sync(job, &job_cancel_err, NULL); + job_cancel(job, true); +} + +int job_cancel_sync(Job *job, bool force) +{ + if (force) { + return job_finish_sync(job, &job_force_cancel_err, NULL); + } else { + return job_finish_sync(job, &job_cancel_err, NULL); + } } void job_cancel_sync_all(void) @@ -977,7 +1039,7 @@ void job_cancel_sync_all(void) while ((job = job_next(NULL))) { aio_context = job->aio_context; aio_context_acquire(aio_context); - job_cancel_sync(job); + job_cancel_sync(job, true); aio_context_release(aio_context); } } @@ -994,7 +1056,7 @@ void job_complete(Job *job, Error **errp) if (job_apply_verb(job, JOB_VERB_COMPLETE, errp)) { return; } - if (job_is_cancelled(job) || !job->driver->complete) { + if (job_cancel_requested(job) || !job->driver->complete) { error_setg(errp, "The active block job '%s' cannot be completed", job->id); return; diff --git a/libdecnumber/decContext.c b/libdecnumber/decContext.c index 7d97a65ac5..1956edf0a7 100644 --- a/libdecnumber/decContext.c +++ b/libdecnumber/decContext.c @@ -53,12 +53,13 @@ static const Flag *mfctop=(Flag *)&mfcone; /* -> top byte */ const uByte DECSTICKYTAB[10]={1,1,2,3,4,6,6,7,8,9}; /* used if sticky */ /* ------------------------------------------------------------------ */ -/* Powers of ten (powers[n]==10**n, 0<=n<=9) */ +/* Powers of ten (powers[n]==10**n, 0<=n<=19) */ /* ------------------------------------------------------------------ */ -const uLong DECPOWERS[19] = {1, 10, 100, 1000, 10000, 100000, 1000000, +const uLong DECPOWERS[20] = {1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000, 10000000000ULL, 100000000000ULL, 1000000000000ULL, 10000000000000ULL, 100000000000000ULL, 1000000000000000ULL, - 10000000000000000ULL, 100000000000000000ULL, 1000000000000000000ULL, }; + 10000000000000000ULL, 100000000000000000ULL, 1000000000000000000ULL, + 10000000000000000000ULL,}; /* ------------------------------------------------------------------ */ /* decContextClearStatus -- clear bits in current status */ diff --git a/libdecnumber/decNumber.c b/libdecnumber/decNumber.c index 1ffe458ad8..31282adafd 100644 --- a/libdecnumber/decNumber.c +++ b/libdecnumber/decNumber.c @@ -167,6 +167,7 @@ /* ------------------------------------------------------------------ */ #include "qemu/osdep.h" +#include "qemu/host-utils.h" #include "libdecnumber/dconfig.h" #include "libdecnumber/decNumber.h" #include "libdecnumber/decNumberLocal.h" @@ -263,6 +264,7 @@ static decNumber * decTrim(decNumber *, decContext *, Flag, Int *); static Int decUnitAddSub(const Unit *, Int, const Unit *, Int, Int, Unit *, Int); static Int decUnitCompare(const Unit *, Int, const Unit *, Int, Int); +static bool mulUInt128ByPowOf10(uLong *, uLong *, uInt); #if !DECSUBSET /* decFinish == decFinalize when no subset arithmetic needed */ @@ -462,6 +464,41 @@ decNumber *decNumberFromUInt64(decNumber *dn, uint64_t uin) return dn; } /* decNumberFromUInt64 */ +decNumber *decNumberFromInt128(decNumber *dn, uint64_t lo, int64_t hi) +{ + uint64_t unsig_hi = hi; + if (hi < 0) { + if (lo == 0) { + unsig_hi = -unsig_hi; + } else { + unsig_hi = ~unsig_hi; + lo = -lo; + } + } + + decNumberFromUInt128(dn, lo, unsig_hi); + if (hi < 0) { + dn->bits = DECNEG; /* sign needed */ + } + return dn; +} /* decNumberFromInt128 */ + +decNumber *decNumberFromUInt128(decNumber *dn, uint64_t lo, uint64_t hi) +{ + uint64_t rem; + Unit *up; /* work pointer */ + decNumberZero(dn); /* clean */ + if (lo == 0 && hi == 0) { + return dn; /* [or decGetDigits bad call] */ + } + for (up = dn->lsu; hi > 0 || lo > 0; up++) { + rem = divu128(&lo, &hi, DECDPUNMAX + 1); + *up = (Unit)rem; + } + dn->digits = decGetDigits(dn->lsu, up - dn->lsu); + return dn; +} /* decNumberFromUInt128 */ + /* ------------------------------------------------------------------ */ /* to-int64 -- conversion to int64 */ /* */ @@ -506,6 +543,68 @@ Invalid: return 0; } /* decNumberIntegralToInt64 */ +/* ------------------------------------------------------------------ */ +/* decNumberIntegralToInt128 -- conversion to int128 */ +/* */ +/* dn is the decNumber to convert. dn is assumed to have been */ +/* rounded to a floating point integer value. */ +/* set is the context for reporting errors */ +/* returns the converted decNumber via plow and phigh */ +/* */ +/* Invalid is set if the decNumber is a NaN, Infinite or is out of */ +/* range for a signed 128 bit integer. */ +/* ------------------------------------------------------------------ */ + +void decNumberIntegralToInt128(const decNumber *dn, decContext *set, + uint64_t *plow, uint64_t *phigh) +{ + int d; /* work */ + const Unit *up; /* .. */ + uint64_t lo = 0, hi = 0; + + if (decNumberIsSpecial(dn) || (dn->exponent < 0) || + (dn->digits + dn->exponent > 39)) { + goto Invalid; + } + + up = dn->lsu; /* -> lsu */ + + for (d = (dn->digits - 1) / DECDPUN; d >= 0; d--) { + if (mulu128(&lo, &hi, DECDPUNMAX + 1)) { + /* overflow */ + goto Invalid; + } + if (uadd64_overflow(lo, up[d], &lo)) { + if (uadd64_overflow(hi, 1, &hi)) { + /* overflow */ + goto Invalid; + } + } + } + + if (mulUInt128ByPowOf10(&lo, &hi, dn->exponent)) { + /* overflow */ + goto Invalid; + } + + if (decNumberIsNegative(dn)) { + if (lo == 0) { + *phigh = -hi; + *plow = 0; + } else { + *phigh = ~hi; + *plow = -lo; + } + } else { + *plow = lo; + *phigh = hi; + } + + return; + +Invalid: + decContextSetStatus(set, DEC_Invalid_operation); +} /* decNumberIntegralToInt128 */ /* ------------------------------------------------------------------ */ /* to-scientific-string -- conversion to numeric string */ @@ -7849,6 +7948,38 @@ static Int decGetDigits(Unit *uar, Int len) { return digits; } /* decGetDigits */ +/* ------------------------------------------------------------------ */ +/* mulUInt128ByPowOf10 -- multiply a 128-bit unsigned integer by a */ +/* power of 10. */ +/* */ +/* The 128-bit factor composed of plow and phigh is multiplied */ +/* by 10^exp. */ +/* */ +/* plow pointer to the low 64 bits of the first factor */ +/* phigh pointer to the high 64 bits of the first factor */ +/* exp the exponent of the power of 10 of the second factor */ +/* */ +/* If the result fits in 128 bits, returns false and the */ +/* multiplication result through plow and phigh. */ +/* Otherwise, returns true. */ +/* ------------------------------------------------------------------ */ +static bool mulUInt128ByPowOf10(uLong *plow, uLong *phigh, uInt pow10) +{ + while (pow10 >= ARRAY_SIZE(powers)) { + if (mulu128(plow, phigh, powers[ARRAY_SIZE(powers) - 1])) { + /* Overflow */ + return true; + } + pow10 -= ARRAY_SIZE(powers) - 1; + } + + if (pow10 > 0) { + return mulu128(plow, phigh, powers[pow10]); + } else { + return false; + } +} + #if DECTRACE | DECCHECK /* ------------------------------------------------------------------ */ /* decNumberShow -- display a number [debug aid] */ diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c index 54a18ab81d..04add21180 100644 --- a/linux-user/aarch64/cpu_loop.c +++ b/linux-user/aarch64/cpu_loop.c @@ -79,7 +79,7 @@ void cpu_loop(CPUARMState *env) { CPUState *cs = env_cpu(env); - int trapnr, ec, fsc, si_code; + int trapnr, ec, fsc, si_code, si_signo; abi_long ret; for (;;) { @@ -131,20 +131,26 @@ void cpu_loop(CPUARMState *env) fsc = extract32(env->exception.syndrome, 0, 6); switch (fsc) { case 0x04 ... 0x07: /* Translation fault, level {0-3} */ + si_signo = TARGET_SIGSEGV; si_code = TARGET_SEGV_MAPERR; break; case 0x09 ... 0x0b: /* Access flag fault, level {1-3} */ case 0x0d ... 0x0f: /* Permission fault, level {1-3} */ + si_signo = TARGET_SIGSEGV; si_code = TARGET_SEGV_ACCERR; break; case 0x11: /* Synchronous Tag Check Fault */ + si_signo = TARGET_SIGSEGV; si_code = TARGET_SEGV_MTESERR; break; + case 0x21: /* Alignment fault */ + si_signo = TARGET_SIGBUS; + si_code = TARGET_BUS_ADRALN; + break; default: g_assert_not_reached(); } - - force_sig_fault(TARGET_SIGSEGV, si_code, env->exception.vaddress); + force_sig_fault(si_signo, si_code, env->exception.vaddress); break; case EXCP_DEBUG: case EXCP_BKPT: diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c index 49025648cb..29c52db3f1 100644 --- a/linux-user/aarch64/signal.c +++ b/linux-user/aarch64/signal.c @@ -109,7 +109,6 @@ struct target_rt_sigframe { struct target_rt_frame_record { uint64_t fp; uint64_t lr; - uint32_t tramp[2]; }; static void target_setup_general_frame(struct target_rt_sigframe *sf, @@ -461,9 +460,9 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, layout.total_size = MAX(layout.total_size, sizeof(struct target_rt_sigframe)); - /* Reserve space for the return code. On a real system this would - * be within the VDSO. So, despite the name this is not a "real" - * record within the frame. + /* + * Reserve space for the standard frame unwind pair: fp, lr. + * Despite the name this is not a "real" record within the frame. */ fr_ofs = layout.total_size; layout.total_size += sizeof(struct target_rt_frame_record); @@ -496,15 +495,7 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, if (ka->sa_flags & TARGET_SA_RESTORER) { return_addr = ka->sa_restorer; } else { - /* - * mov x8,#__NR_rt_sigreturn; svc #0 - * Since these are instructions they need to be put as little-endian - * regardless of target default or current CPU endianness. - */ - __put_user_e(0xd2801168, &fr->tramp[0], le); - __put_user_e(0xd4000001, &fr->tramp[1], le); - return_addr = frame_addr + fr_ofs - + offsetof(struct target_rt_frame_record, tramp); + return_addr = default_rt_sigreturn; } env->xregs[0] = usig; env->xregs[29] = frame_addr + fr_ofs; @@ -577,3 +568,20 @@ long do_sigreturn(CPUARMState *env) { return do_rt_sigreturn(env); } + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 8, 0); + assert(tramp != NULL); + + /* + * mov x8,#__NR_rt_sigreturn; svc #0 + * Since these are instructions they need to be put as little-endian + * regardless of target default or current CPU endianness. + */ + __put_user_e(0xd2801168, &tramp[0], le); + __put_user_e(0xd4000001, &tramp[1], le); + + default_rt_sigreturn = sigtramp_page; + unlock_user(tramp, sigtramp_page, 8); +} diff --git a/linux-user/aarch64/target_signal.h b/linux-user/aarch64/target_signal.h index 18013e1b23..7580d99403 100644 --- a/linux-user/aarch64/target_signal.h +++ b/linux-user/aarch64/target_signal.h @@ -25,4 +25,6 @@ typedef struct target_sigaltstack { #define TARGET_SEGV_MTESERR 9 /* Synchronous ARM MTE exception */ #define TARGET_ARCH_HAS_SETUP_FRAME +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* AARCH64_TARGET_SIGNAL_H */ diff --git a/linux-user/alpha/cpu_loop.c b/linux-user/alpha/cpu_loop.c index 1b00a81385..4029849d5c 100644 --- a/linux-user/alpha/cpu_loop.c +++ b/linux-user/alpha/cpu_loop.c @@ -54,21 +54,6 @@ void cpu_loop(CPUAlphaState *env) fprintf(stderr, "External interrupt. Exit\n"); exit(EXIT_FAILURE); break; - case EXCP_MMFAULT: - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - info.si_code = (page_get_flags(env->trap_arg0) & PAGE_VALID - ? TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR); - info._sifields._sigfault._addr = env->trap_arg0; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; - case EXCP_UNALIGN: - info.si_signo = TARGET_SIGBUS; - info.si_errno = 0; - info.si_code = TARGET_BUS_ADRALN; - info._sifields._sigfault._addr = env->trap_arg0; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; case EXCP_OPCDEC: do_sigill: info.si_signo = TARGET_SIGILL; diff --git a/linux-user/alpha/signal.c b/linux-user/alpha/signal.c index 3a820f616b..bbe3dd175a 100644 --- a/linux-user/alpha/signal.c +++ b/linux-user/alpha/signal.c @@ -55,13 +55,11 @@ struct target_ucontext { struct target_sigframe { struct target_sigcontext sc; - unsigned int retcode[3]; }; struct target_rt_sigframe { target_siginfo_t info; struct target_ucontext uc; - unsigned int retcode[3]; }; #define INSN_MOV_R30_R16 0x47fe0410 @@ -142,12 +140,7 @@ void setup_frame(int sig, struct target_sigaction *ka, if (ka->ka_restorer) { r26 = ka->ka_restorer; } else { - __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); - __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn, - &frame->retcode[1]); - __put_user(INSN_CALLSYS, &frame->retcode[2]); - /* imb() */ - r26 = frame_addr + offsetof(struct target_sigframe, retcode); + r26 = default_sigreturn; } unlock_user_struct(frame, frame_addr, 1); @@ -196,12 +189,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, if (ka->ka_restorer) { r26 = ka->ka_restorer; } else { - __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); - __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn, - &frame->retcode[1]); - __put_user(INSN_CALLSYS, &frame->retcode[2]); - /* imb(); */ - r26 = frame_addr + offsetof(struct target_rt_sigframe, retcode); + r26 = default_rt_sigreturn; } if (err) { @@ -269,3 +257,21 @@ badframe: force_sig(TARGET_SIGSEGV); return -TARGET_QEMU_ESIGRETURN; } + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 6 * 4, 0); + assert(tramp != NULL); + + default_sigreturn = sigtramp_page; + __put_user(INSN_MOV_R30_R16, &tramp[0]); + __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn, &tramp[1]); + __put_user(INSN_CALLSYS, &tramp[2]); + + default_rt_sigreturn = sigtramp_page + 3 * 4; + __put_user(INSN_MOV_R30_R16, &tramp[3]); + __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn, &tramp[4]); + __put_user(INSN_CALLSYS, &tramp[5]); + + unlock_user(tramp, sigtramp_page, 6 * 4); +} diff --git a/linux-user/alpha/target_signal.h b/linux-user/alpha/target_signal.h index 250642913e..0b6a39de65 100644 --- a/linux-user/alpha/target_signal.h +++ b/linux-user/alpha/target_signal.h @@ -93,6 +93,7 @@ typedef struct target_sigaltstack { #define TARGET_ARCH_HAS_SETUP_FRAME #define TARGET_ARCH_HAS_KA_RESTORER +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 /* bit-flags */ #define TARGET_SS_AUTODISARM (1U << 31) /* disable sas during sighandling */ diff --git a/linux-user/arm/cpu_loop.c b/linux-user/arm/cpu_loop.c index 3f94354e9b..963137e7d2 100644 --- a/linux-user/arm/cpu_loop.c +++ b/linux-user/arm/cpu_loop.c @@ -25,6 +25,7 @@ #include "cpu_loop-common.h" #include "signal-common.h" #include "semihosting/common-semi.h" +#include "target/arm/syndrome.h" #define get_user_code_u32(x, gaddr, env) \ ({ abi_long __r = get_user_u32((x), (gaddr)); \ @@ -280,7 +281,7 @@ static bool emulate_arm_fpa11(CPUARMState *env, uint32_t opcode) void cpu_loop(CPUARMState *env) { CPUState *cs = env_cpu(env); - int trapnr; + int trapnr, si_signo, si_code; unsigned int n, insn; abi_ulong ret; @@ -433,9 +434,30 @@ void cpu_loop(CPUARMState *env) break; case EXCP_PREFETCH_ABORT: case EXCP_DATA_ABORT: - /* XXX: check env->error_code */ - force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_MAPERR, - env->exception.vaddress); + /* For user-only we don't set TTBCR_EAE, so look at the FSR. */ + switch (env->exception.fsr & 0x1f) { + case 0x1: /* Alignment */ + si_signo = TARGET_SIGBUS; + si_code = TARGET_BUS_ADRALN; + break; + case 0x3: /* Access flag fault, level 1 */ + case 0x6: /* Access flag fault, level 2 */ + case 0x9: /* Domain fault, level 1 */ + case 0xb: /* Domain fault, level 2 */ + case 0xd: /* Permision fault, level 1 */ + case 0xf: /* Permision fault, level 2 */ + si_signo = TARGET_SIGSEGV; + si_code = TARGET_SEGV_ACCERR; + break; + case 0x5: /* Translation fault, level 1 */ + case 0x7: /* Translation fault, level 2 */ + si_signo = TARGET_SIGSEGV; + si_code = TARGET_SEGV_MAPERR; + break; + default: + g_assert_not_reached(); + } + force_sig_fault(si_signo, si_code, env->exception.vaddress); break; case EXCP_DEBUG: case EXCP_BKPT: diff --git a/linux-user/arm/signal.c b/linux-user/arm/signal.c index ed144f9455..df9f8e8eb2 100644 --- a/linux-user/arm/signal.c +++ b/linux-user/arm/signal.c @@ -46,15 +46,7 @@ struct target_sigcontext { abi_ulong fault_address; }; -struct target_ucontext_v1 { - abi_ulong tuc_flags; - abi_ulong tuc_link; - target_stack_t tuc_stack; - struct target_sigcontext tuc_mcontext; - target_sigset_t tuc_sigmask; /* mask last for extensibility */ -}; - -struct target_ucontext_v2 { +struct target_ucontext { abi_ulong tuc_flags; abi_ulong tuc_link; target_stack_t tuc_stack; @@ -98,68 +90,30 @@ struct target_iwmmxt_sigframe { #define TARGET_VFP_MAGIC 0x56465001 #define TARGET_IWMMXT_MAGIC 0x12ef842a -struct sigframe_v1 +struct sigframe { - struct target_sigcontext sc; - abi_ulong extramask[TARGET_NSIG_WORDS-1]; + struct target_ucontext uc; abi_ulong retcode[4]; }; -struct sigframe_v2 -{ - struct target_ucontext_v2 uc; - abi_ulong retcode[4]; -}; - -struct rt_sigframe_v1 -{ - abi_ulong pinfo; - abi_ulong puc; - struct target_siginfo info; - struct target_ucontext_v1 uc; - abi_ulong retcode[4]; -}; - -struct rt_sigframe_v2 +struct rt_sigframe { struct target_siginfo info; - struct target_ucontext_v2 uc; - abi_ulong retcode[4]; + struct sigframe sig; }; +static abi_ptr sigreturn_fdpic_tramp; + /* - * For ARM syscalls, we encode the syscall number into the instruction. + * Up to 3 words of 'retcode' in the sigframe are code, + * with retcode[3] being used by fdpic for the function descriptor. + * This code is not actually executed, but is retained for ABI compat. + * + * We will create a table of 8 retcode variants in the sigtramp page. + * Let each table entry use 3 words. */ -#define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE)) -#define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE)) - -/* - * For Thumb syscalls, we pass the syscall number via r7. We therefore - * need two 16-bit instructions. - */ -#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn)) -#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn)) - -static const abi_ulong retcodes[4] = { - SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN, - SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN -}; - -/* - * Stub needed to make sure the FD register (r9) contains the right - * value. - */ -static const unsigned long sigreturn_fdpic_codes[3] = { - 0xe59fc004, /* ldr r12, [pc, #4] to read function descriptor */ - 0xe59c9004, /* ldr r9, [r12, #4] to setup GOT */ - 0xe59cf000 /* ldr pc, [r12] to jump into restorer */ -}; - -static const unsigned long sigreturn_fdpic_thumb_codes[3] = { - 0xc008f8df, /* ldr r12, [pc, #8] to read function descriptor */ - 0x9004f8dc, /* ldr r9, [r12, #4] to setup GOT */ - 0xf000f8dc /* ldr pc, [r12] to jump into restorer */ -}; +#define RETCODE_WORDS 3 +#define RETCODE_BYTES (RETCODE_WORDS * 4) static inline int valid_user_regs(CPUARMState *regs) { @@ -207,15 +161,15 @@ get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize) } static int -setup_return(CPUARMState *env, struct target_sigaction *ka, - abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr) +setup_return(CPUARMState *env, struct target_sigaction *ka, int usig, + struct sigframe *frame, abi_ulong sp_addr) { abi_ulong handler = 0; abi_ulong handler_fdpic_GOT = 0; abi_ulong retcode; - - int thumb; + int thumb, retcode_idx; int is_fdpic = info_is_fdpic(((TaskState *)thread_cpu->opaque)->info); + bool copy_retcode; if (is_fdpic) { /* In FDPIC mode, ka->_sa_handler points to a function @@ -232,6 +186,7 @@ setup_return(CPUARMState *env, struct target_sigaction *ka, } thumb = handler & 1; + retcode_idx = thumb + (ka->sa_flags & TARGET_SA_SIGINFO ? 2 : 0); uint32_t cpsr = cpsr_read(env); @@ -249,44 +204,29 @@ setup_return(CPUARMState *env, struct target_sigaction *ka, if (ka->sa_flags & TARGET_SA_RESTORER) { if (is_fdpic) { - /* For FDPIC we ensure that the restorer is called with a - * correct r9 value. For that we need to write code on - * the stack that sets r9 and jumps back to restorer - * value. - */ - if (thumb) { - __put_user(sigreturn_fdpic_thumb_codes[0], rc); - __put_user(sigreturn_fdpic_thumb_codes[1], rc + 1); - __put_user(sigreturn_fdpic_thumb_codes[2], rc + 2); - __put_user((abi_ulong)ka->sa_restorer, rc + 3); - } else { - __put_user(sigreturn_fdpic_codes[0], rc); - __put_user(sigreturn_fdpic_codes[1], rc + 1); - __put_user(sigreturn_fdpic_codes[2], rc + 2); - __put_user((abi_ulong)ka->sa_restorer, rc + 3); - } - - retcode = rc_addr + thumb; + __put_user((abi_ulong)ka->sa_restorer, &frame->retcode[3]); + retcode = (sigreturn_fdpic_tramp + + retcode_idx * RETCODE_BYTES + thumb); + copy_retcode = true; } else { retcode = ka->sa_restorer; + copy_retcode = false; } } else { - unsigned int idx = thumb; + retcode = default_sigreturn + retcode_idx * RETCODE_BYTES + thumb; + copy_retcode = true; + } - if (ka->sa_flags & TARGET_SA_SIGINFO) { - idx += 2; - } - - __put_user(retcodes[idx], rc); - - retcode = rc_addr + thumb; + /* Copy the code to the stack slot for ABI compatibility. */ + if (copy_retcode) { + memcpy(frame->retcode, g2h_untagged(retcode & ~1), RETCODE_BYTES); } env->regs[0] = usig; if (is_fdpic) { env->regs[9] = handler_fdpic_GOT; } - env->regs[13] = frame_addr; + env->regs[13] = sp_addr; env->regs[14] = retcode; env->regs[15] = handler & (thumb ? ~1 : ~3); cpsr_write(env, cpsr, CPSR_IT | CPSR_T | CPSR_E, CPSRWriteByInstr); @@ -294,7 +234,7 @@ setup_return(CPUARMState *env, struct target_sigaction *ka, return 0; } -static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env) +static abi_ulong *setup_sigframe_vfp(abi_ulong *regspace, CPUARMState *env) { int i; struct target_vfp_sigframe *vfpframe; @@ -311,8 +251,7 @@ static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env) return (abi_ulong*)(vfpframe+1); } -static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace, - CPUARMState *env) +static abi_ulong *setup_sigframe_iwmmxt(abi_ulong *regspace, CPUARMState *env) { int i; struct target_iwmmxt_sigframe *iwmmxtframe; @@ -331,15 +270,15 @@ static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace, return (abi_ulong*)(iwmmxtframe+1); } -static void setup_sigframe_v2(struct target_ucontext_v2 *uc, - target_sigset_t *set, CPUARMState *env) +static void setup_sigframe(struct target_ucontext *uc, + target_sigset_t *set, CPUARMState *env) { struct target_sigaltstack stack; int i; abi_ulong *regspace; /* Clear all the bits of the ucontext we don't use. */ - memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext)); + memset(uc, 0, offsetof(struct target_ucontext, tuc_mcontext)); memset(&stack, 0, sizeof(stack)); target_save_altstack(&stack, env); @@ -349,10 +288,10 @@ static void setup_sigframe_v2(struct target_ucontext_v2 *uc, /* Save coprocessor signal frame. */ regspace = uc->tuc_regspace; if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) { - regspace = setup_sigframe_v2_vfp(regspace, env); + regspace = setup_sigframe_vfp(regspace, env); } if (arm_feature(env, ARM_FEATURE_IWMMXT)) { - regspace = setup_sigframe_v2_iwmmxt(regspace, env); + regspace = setup_sigframe_iwmmxt(regspace, env); } /* Write terminating magic word */ @@ -363,148 +302,23 @@ static void setup_sigframe_v2(struct target_ucontext_v2 *uc, } } -/* compare linux/arch/arm/kernel/signal.c:setup_frame() */ -static void setup_frame_v1(int usig, struct target_sigaction *ka, - target_sigset_t *set, CPUARMState *regs) -{ - struct sigframe_v1 *frame; - abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); - int i; - - trace_user_setup_frame(regs, frame_addr); - if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { - goto sigsegv; - } - - setup_sigcontext(&frame->sc, regs, set->sig[0]); - - for(i = 1; i < TARGET_NSIG_WORDS; i++) { - __put_user(set->sig[i], &frame->extramask[i - 1]); - } - - if (setup_return(regs, ka, frame->retcode, frame_addr, usig, - frame_addr + offsetof(struct sigframe_v1, retcode))) { - goto sigsegv; - } - - unlock_user_struct(frame, frame_addr, 1); - return; -sigsegv: - unlock_user_struct(frame, frame_addr, 1); - force_sigsegv(usig); -} - -static void setup_frame_v2(int usig, struct target_sigaction *ka, - target_sigset_t *set, CPUARMState *regs) -{ - struct sigframe_v2 *frame; - abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); - - trace_user_setup_frame(regs, frame_addr); - if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { - goto sigsegv; - } - - setup_sigframe_v2(&frame->uc, set, regs); - - if (setup_return(regs, ka, frame->retcode, frame_addr, usig, - frame_addr + offsetof(struct sigframe_v2, retcode))) { - goto sigsegv; - } - - unlock_user_struct(frame, frame_addr, 1); - return; -sigsegv: - unlock_user_struct(frame, frame_addr, 1); - force_sigsegv(usig); -} - void setup_frame(int usig, struct target_sigaction *ka, target_sigset_t *set, CPUARMState *regs) { - if (get_osversion() >= 0x020612) { - setup_frame_v2(usig, ka, set, regs); - } else { - setup_frame_v1(usig, ka, set, regs); - } -} + struct sigframe *frame; + abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame)); -/* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */ -static void setup_rt_frame_v1(int usig, struct target_sigaction *ka, - target_siginfo_t *info, - target_sigset_t *set, CPUARMState *env) -{ - struct rt_sigframe_v1 *frame; - abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); - struct target_sigaltstack stack; - int i; - abi_ulong info_addr, uc_addr; - - trace_user_setup_rt_frame(env, frame_addr); + trace_user_setup_frame(regs, frame_addr); if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { goto sigsegv; } - info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info); - __put_user(info_addr, &frame->pinfo); - uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc); - __put_user(uc_addr, &frame->puc); - tswap_siginfo(&frame->info, info); + setup_sigframe(&frame->uc, set, regs); - /* Clear all the bits of the ucontext we don't use. */ - memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext)); - - memset(&stack, 0, sizeof(stack)); - target_save_altstack(&stack, env); - memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack)); - - setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]); - for(i = 0; i < TARGET_NSIG_WORDS; i++) { - __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); - } - - if (setup_return(env, ka, frame->retcode, frame_addr, usig, - frame_addr + offsetof(struct rt_sigframe_v1, retcode))) { + if (setup_return(regs, ka, usig, frame, frame_addr)) { goto sigsegv; } - env->regs[1] = info_addr; - env->regs[2] = uc_addr; - - unlock_user_struct(frame, frame_addr, 1); - return; -sigsegv: - unlock_user_struct(frame, frame_addr, 1); - force_sigsegv(usig); -} - -static void setup_rt_frame_v2(int usig, struct target_sigaction *ka, - target_siginfo_t *info, - target_sigset_t *set, CPUARMState *env) -{ - struct rt_sigframe_v2 *frame; - abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); - abi_ulong info_addr, uc_addr; - - trace_user_setup_rt_frame(env, frame_addr); - if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { - goto sigsegv; - } - - info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info); - uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc); - tswap_siginfo(&frame->info, info); - - setup_sigframe_v2(&frame->uc, set, env); - - if (setup_return(env, ka, frame->retcode, frame_addr, usig, - frame_addr + offsetof(struct rt_sigframe_v2, retcode))) { - goto sigsegv; - } - - env->regs[1] = info_addr; - env->regs[2] = uc_addr; - unlock_user_struct(frame, frame_addr, 1); return; sigsegv: @@ -516,11 +330,33 @@ void setup_rt_frame(int usig, struct target_sigaction *ka, target_siginfo_t *info, target_sigset_t *set, CPUARMState *env) { - if (get_osversion() >= 0x020612) { - setup_rt_frame_v2(usig, ka, info, set, env); - } else { - setup_rt_frame_v1(usig, ka, info, set, env); + struct rt_sigframe *frame; + abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame)); + abi_ulong info_addr, uc_addr; + + trace_user_setup_rt_frame(env, frame_addr); + if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { + goto sigsegv; } + + info_addr = frame_addr + offsetof(struct rt_sigframe, info); + uc_addr = frame_addr + offsetof(struct rt_sigframe, sig.uc); + tswap_siginfo(&frame->info, info); + + setup_sigframe(&frame->sig.uc, set, env); + + if (setup_return(env, ka, usig, &frame->sig, frame_addr)) { + goto sigsegv; + } + + env->regs[1] = info_addr; + env->regs[2] = uc_addr; + + unlock_user_struct(frame, frame_addr, 1); + return; +sigsegv: + unlock_user_struct(frame, frame_addr, 1); + force_sigsegv(usig); } static int @@ -553,55 +389,7 @@ restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc) return err; } -static long do_sigreturn_v1(CPUARMState *env) -{ - abi_ulong frame_addr; - struct sigframe_v1 *frame = NULL; - target_sigset_t set; - sigset_t host_set; - int i; - - /* - * Since we stacked the signal on a 64-bit boundary, - * then 'sp' should be word aligned here. If it's - * not, then the user is trying to mess with us. - */ - frame_addr = env->regs[13]; - trace_user_do_sigreturn(env, frame_addr); - if (frame_addr & 7) { - goto badframe; - } - - if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { - goto badframe; - } - - __get_user(set.sig[0], &frame->sc.oldmask); - for(i = 1; i < TARGET_NSIG_WORDS; i++) { - __get_user(set.sig[i], &frame->extramask[i - 1]); - } - - target_to_host_sigset_internal(&host_set, &set); - set_sigmask(&host_set); - - if (restore_sigcontext(env, &frame->sc)) { - goto badframe; - } - -#if 0 - /* Send SIGTRAP if we're single-stepping */ - if (ptrace_cancel_bpt(current)) - send_sig(SIGTRAP, current, 1); -#endif - unlock_user_struct(frame, frame_addr, 0); - return -TARGET_QEMU_ESIGRETURN; - -badframe: - force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; -} - -static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace) +static abi_ulong *restore_sigframe_vfp(CPUARMState *env, abi_ulong *regspace) { int i; abi_ulong magic, sz; @@ -631,8 +419,8 @@ static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace) return (abi_ulong*)(vfpframe + 1); } -static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env, - abi_ulong *regspace) +static abi_ulong *restore_sigframe_iwmmxt(CPUARMState *env, + abi_ulong *regspace) { int i; abi_ulong magic, sz; @@ -656,9 +444,9 @@ static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env, return (abi_ulong*)(iwmmxtframe + 1); } -static int do_sigframe_return_v2(CPUARMState *env, - target_ulong context_addr, - struct target_ucontext_v2 *uc) +static int do_sigframe_return(CPUARMState *env, + target_ulong context_addr, + struct target_ucontext *uc) { sigset_t host_set; abi_ulong *regspace; @@ -666,19 +454,20 @@ static int do_sigframe_return_v2(CPUARMState *env, target_to_host_sigset(&host_set, &uc->tuc_sigmask); set_sigmask(&host_set); - if (restore_sigcontext(env, &uc->tuc_mcontext)) + if (restore_sigcontext(env, &uc->tuc_mcontext)) { return 1; + } /* Restore coprocessor signal frame */ regspace = uc->tuc_regspace; if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) { - regspace = restore_sigframe_v2_vfp(env, regspace); + regspace = restore_sigframe_vfp(env, regspace); if (!regspace) { return 1; } } if (arm_feature(env, ARM_FEATURE_IWMMXT)) { - regspace = restore_sigframe_v2_iwmmxt(env, regspace); + regspace = restore_sigframe_iwmmxt(env, regspace); if (!regspace) { return 1; } @@ -695,10 +484,10 @@ static int do_sigframe_return_v2(CPUARMState *env, return 0; } -static long do_sigreturn_v2(CPUARMState *env) +long do_sigreturn(CPUARMState *env) { abi_ulong frame_addr; - struct sigframe_v2 *frame = NULL; + struct sigframe *frame = NULL; /* * Since we stacked the signal on a 64-bit boundary, @@ -715,99 +504,9 @@ static long do_sigreturn_v2(CPUARMState *env) goto badframe; } - if (do_sigframe_return_v2(env, - frame_addr - + offsetof(struct sigframe_v2, uc), - &frame->uc)) { - goto badframe; - } - - unlock_user_struct(frame, frame_addr, 0); - return -TARGET_QEMU_ESIGRETURN; - -badframe: - unlock_user_struct(frame, frame_addr, 0); - force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; -} - -long do_sigreturn(CPUARMState *env) -{ - if (get_osversion() >= 0x020612) { - return do_sigreturn_v2(env); - } else { - return do_sigreturn_v1(env); - } -} - -static long do_rt_sigreturn_v1(CPUARMState *env) -{ - abi_ulong frame_addr; - struct rt_sigframe_v1 *frame = NULL; - sigset_t host_set; - - /* - * Since we stacked the signal on a 64-bit boundary, - * then 'sp' should be word aligned here. If it's - * not, then the user is trying to mess with us. - */ - frame_addr = env->regs[13]; - trace_user_do_rt_sigreturn(env, frame_addr); - if (frame_addr & 7) { - goto badframe; - } - - if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { - goto badframe; - } - - target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask); - set_sigmask(&host_set); - - if (restore_sigcontext(env, &frame->uc.tuc_mcontext)) { - goto badframe; - } - - target_restore_altstack(&frame->uc.tuc_stack, env); - -#if 0 - /* Send SIGTRAP if we're single-stepping */ - if (ptrace_cancel_bpt(current)) - send_sig(SIGTRAP, current, 1); -#endif - unlock_user_struct(frame, frame_addr, 0); - return -TARGET_QEMU_ESIGRETURN; - -badframe: - unlock_user_struct(frame, frame_addr, 0); - force_sig(TARGET_SIGSEGV); - return -TARGET_QEMU_ESIGRETURN; -} - -static long do_rt_sigreturn_v2(CPUARMState *env) -{ - abi_ulong frame_addr; - struct rt_sigframe_v2 *frame = NULL; - - /* - * Since we stacked the signal on a 64-bit boundary, - * then 'sp' should be word aligned here. If it's - * not, then the user is trying to mess with us. - */ - frame_addr = env->regs[13]; - trace_user_do_rt_sigreturn(env, frame_addr); - if (frame_addr & 7) { - goto badframe; - } - - if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { - goto badframe; - } - - if (do_sigframe_return_v2(env, - frame_addr - + offsetof(struct rt_sigframe_v2, uc), - &frame->uc)) { + if (do_sigframe_return(env, + frame_addr + offsetof(struct sigframe, uc), + &frame->uc)) { goto badframe; } @@ -822,9 +521,110 @@ badframe: long do_rt_sigreturn(CPUARMState *env) { - if (get_osversion() >= 0x020612) { - return do_rt_sigreturn_v2(env); - } else { - return do_rt_sigreturn_v1(env); + abi_ulong frame_addr; + struct rt_sigframe *frame = NULL; + + /* + * Since we stacked the signal on a 64-bit boundary, + * then 'sp' should be word aligned here. If it's + * not, then the user is trying to mess with us. + */ + frame_addr = env->regs[13]; + trace_user_do_rt_sigreturn(env, frame_addr); + if (frame_addr & 7) { + goto badframe; } + + if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { + goto badframe; + } + + if (do_sigframe_return(env, + frame_addr + offsetof(struct rt_sigframe, sig.uc), + &frame->sig.uc)) { + goto badframe; + } + + unlock_user_struct(frame, frame_addr, 0); + return -TARGET_QEMU_ESIGRETURN; + +badframe: + unlock_user_struct(frame, frame_addr, 0); + force_sig(TARGET_SIGSEGV); + return -TARGET_QEMU_ESIGRETURN; +} + +/* + * EABI syscalls pass the number via r7. + * Note that the kernel still adds the OABI syscall number to the trap, + * presumably for backward ABI compatibility with unwinders. + */ +#define ARM_MOV_R7_IMM(X) (0xe3a07000 | (X)) +#define ARM_SWI_SYS(X) (0xef000000 | (X) | ARM_SYSCALL_BASE) + +#define THUMB_MOVS_R7_IMM(X) (0x2700 | (X)) +#define THUMB_SWI_SYS 0xdf00 + +static void write_arm_sigreturn(uint32_t *rc, int syscall) +{ + __put_user(ARM_MOV_R7_IMM(syscall), rc); + __put_user(ARM_SWI_SYS(syscall), rc + 1); + /* Wrote 8 of 12 bytes */ +} + +static void write_thm_sigreturn(uint32_t *rc, int syscall) +{ + __put_user(THUMB_SWI_SYS << 16 | THUMB_MOVS_R7_IMM(syscall), rc); + /* Wrote 4 of 12 bytes */ +} + +/* + * Stub needed to make sure the FD register (r9) contains the right value. + * Use the same instruction sequence as the kernel. + */ +static void write_arm_fdpic_sigreturn(uint32_t *rc, int ofs) +{ + assert(ofs <= 0xfff); + __put_user(0xe59d3000 | ofs, rc + 0); /* ldr r3, [sp, #ofs] */ + __put_user(0xe8930908, rc + 1); /* ldm r3, { r3, r9 } */ + __put_user(0xe12fff13, rc + 2); /* bx r3 */ + /* Wrote 12 of 12 bytes */ +} + +static void write_thm_fdpic_sigreturn(void *vrc, int ofs) +{ + uint16_t *rc = vrc; + + assert((ofs & ~0x3fc) == 0); + __put_user(0x9b00 | (ofs >> 2), rc + 0); /* ldr r3, [sp, #ofs] */ + __put_user(0xcb0c, rc + 1); /* ldm r3, { r2, r3 } */ + __put_user(0x4699, rc + 2); /* mov r9, r3 */ + __put_user(0x4710, rc + 3); /* bx r2 */ + /* Wrote 8 of 12 bytes */ +} + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint32_t total_size = 8 * RETCODE_BYTES; + uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, total_size, 0); + + assert(tramp != NULL); + + default_sigreturn = sigtramp_page; + write_arm_sigreturn(&tramp[0 * RETCODE_WORDS], TARGET_NR_sigreturn); + write_thm_sigreturn(&tramp[1 * RETCODE_WORDS], TARGET_NR_sigreturn); + write_arm_sigreturn(&tramp[2 * RETCODE_WORDS], TARGET_NR_rt_sigreturn); + write_thm_sigreturn(&tramp[3 * RETCODE_WORDS], TARGET_NR_rt_sigreturn); + + sigreturn_fdpic_tramp = sigtramp_page + 4 * RETCODE_BYTES; + write_arm_fdpic_sigreturn(tramp + 4 * RETCODE_WORDS, + offsetof(struct sigframe, retcode[3])); + write_thm_fdpic_sigreturn(tramp + 5 * RETCODE_WORDS, + offsetof(struct sigframe, retcode[3])); + write_arm_fdpic_sigreturn(tramp + 6 * RETCODE_WORDS, + offsetof(struct rt_sigframe, sig.retcode[3])); + write_thm_fdpic_sigreturn(tramp + 7 * RETCODE_WORDS, + offsetof(struct rt_sigframe, sig.retcode[3])); + + unlock_user(tramp, sigtramp_page, total_size); } diff --git a/linux-user/arm/target_signal.h b/linux-user/arm/target_signal.h index 0998dd6dfa..1e7fb0cecb 100644 --- a/linux-user/arm/target_signal.h +++ b/linux-user/arm/target_signal.h @@ -22,4 +22,6 @@ typedef struct target_sigaltstack { #include "../generic/signal.h" #define TARGET_ARCH_HAS_SETUP_FRAME +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* ARM_TARGET_SIGNAL_H */ diff --git a/linux-user/cris/cpu_loop.c b/linux-user/cris/cpu_loop.c index b9085619c4..0d5d268609 100644 --- a/linux-user/cris/cpu_loop.c +++ b/linux-user/cris/cpu_loop.c @@ -37,16 +37,6 @@ void cpu_loop(CPUCRISState *env) process_queued_cpu_work(cs); switch (trapnr) { - case 0xaa: - { - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - /* XXX: check env->error_code */ - info.si_code = TARGET_SEGV_MAPERR; - info._sifields._sigfault._addr = env->pregs[PR_EDA]; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - } - break; case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ break; diff --git a/linux-user/cris/signal.c b/linux-user/cris/signal.c index 2c39bdf727..7f6aca934e 100644 --- a/linux-user/cris/signal.c +++ b/linux-user/cris/signal.c @@ -97,6 +97,14 @@ static abi_ulong get_sigframe(CPUCRISState *env, int framesize) return sp - framesize; } +static void setup_sigreturn(uint16_t *retcode) +{ + /* This is movu.w __NR_sigreturn, r9; break 13; */ + __put_user(0x9c5f, retcode + 0); + __put_user(TARGET_NR_sigreturn, retcode + 1); + __put_user(0xe93d, retcode + 2); +} + void setup_frame(int sig, struct target_sigaction *ka, target_sigset_t *set, CPUCRISState *env) { @@ -112,14 +120,8 @@ void setup_frame(int sig, struct target_sigaction *ka, /* * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't * use this trampoline anymore but it sets it up for GDB. - * In QEMU, using the trampoline simplifies things a bit so we use it. - * - * This is movu.w __NR_sigreturn, r9; break 13; */ - __put_user(0x9c5f, frame->retcode+0); - __put_user(TARGET_NR_sigreturn, - frame->retcode + 1); - __put_user(0xe93d, frame->retcode + 2); + setup_sigreturn(frame->retcode); /* Save the mask. */ __put_user(set->sig[0], &frame->sc.oldmask); @@ -135,7 +137,7 @@ void setup_frame(int sig, struct target_sigaction *ka, env->regs[10] = sig; env->pc = (unsigned long) ka->_sa_handler; /* Link SRP so the guest returns through the trampoline. */ - env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode); + env->pregs[PR_SRP] = default_sigreturn; unlock_user_struct(frame, frame_addr, 1); return; @@ -187,3 +189,14 @@ long do_rt_sigreturn(CPUCRISState *env) qemu_log_mask(LOG_UNIMP, "do_rt_sigreturn: not implemented\n"); return -TARGET_ENOSYS; } + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint16_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 6, 0); + assert(tramp != NULL); + + default_sigreturn = sigtramp_page; + setup_sigreturn(tramp); + + unlock_user(tramp, sigtramp_page, 6); +} diff --git a/linux-user/cris/target_signal.h b/linux-user/cris/target_signal.h index 495a142896..83a5155507 100644 --- a/linux-user/cris/target_signal.h +++ b/linux-user/cris/target_signal.h @@ -22,4 +22,6 @@ typedef struct target_sigaltstack { #include "../generic/signal.h" #define TARGET_ARCH_HAS_SETUP_FRAME +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* CRIS_TARGET_SIGNAL_H */ diff --git a/linux-user/elfload.c b/linux-user/elfload.c index 5f9e2141ad..5da8c02d08 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -7,6 +7,7 @@ #include "qemu.h" #include "user-internals.h" +#include "signal-common.h" #include "loader.h" #include "user-mmap.h" #include "disas/disas.h" @@ -17,6 +18,7 @@ #include "qemu/units.h" #include "qemu/selfmap.h" #include "qapi/error.h" +#include "target_signal.h" #ifdef _ARCH_PPC64 #undef ARCH_DLINFO @@ -899,7 +901,7 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *en (*regs)[33] = tswapreg(env->msr); (*regs)[35] = tswapreg(env->ctr); (*regs)[36] = tswapreg(env->lr); - (*regs)[37] = tswapreg(env->xer); + (*regs)[37] = tswapreg(cpu_read_xer(env)); for (i = 0; i < ARRAY_SIZE(env->crf); i++) { ccr |= env->crf[i] << (32 - ((i + 1) * 4)); @@ -923,8 +925,6 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *en #endif #define ELF_ARCH EM_MIPS -#define elf_check_arch(x) ((x) == EM_MIPS || (x) == EM_NANOMIPS) - #ifdef TARGET_ABI_MIPSN32 #define elf_check_abi(x) ((x) & EF_MIPS_ABI2) #else @@ -1446,7 +1446,7 @@ static uint32_t get_elf_hwcap(void) uint32_t mask = MISA_BIT('I') | MISA_BIT('M') | MISA_BIT('A') | MISA_BIT('F') | MISA_BIT('D') | MISA_BIT('C'); - return cpu->env.misa & mask; + return cpu->env.misa_ext & mask; #undef MISA_BIT } @@ -3249,6 +3249,18 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info) #endif } + /* + * TODO: load a vdso, which would also contain the signal trampolines. + * Otherwise, allocate a private page to hold them. + */ + if (TARGET_ARCH_HAS_SIGTRAMP_PAGE) { + abi_ulong tramp_page = target_mmap(0, TARGET_PAGE_SIZE, + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANON, -1, 0); + setup_sigtramp(tramp_page); + target_mprotect(tramp_page, TARGET_PAGE_SIZE, PROT_READ | PROT_EXEC); + } + bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &elf_ex, info, (elf_interpreter ? &interp_info : NULL)); info->start_stack = bprm->p; diff --git a/linux-user/hexagon/cpu_loop.c b/linux-user/hexagon/cpu_loop.c index bee2a9e4ea..6b24cbaba9 100644 --- a/linux-user/hexagon/cpu_loop.c +++ b/linux-user/hexagon/cpu_loop.c @@ -28,8 +28,7 @@ void cpu_loop(CPUHexagonState *env) { CPUState *cs = env_cpu(env); - int trapnr, signum, sigcode; - target_ulong sigaddr; + int trapnr; target_ulong syscallnum; target_ulong ret; @@ -39,10 +38,6 @@ void cpu_loop(CPUHexagonState *env) cpu_exec_end(cs); process_queued_cpu_work(cs); - signum = 0; - sigcode = 0; - sigaddr = 0; - switch (trapnr) { case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ @@ -65,12 +60,6 @@ void cpu_loop(CPUHexagonState *env) env->gpr[0] = ret; } break; - case HEX_EXCP_FETCH_NO_UPAGE: - case HEX_EXCP_PRIV_NO_UREAD: - case HEX_EXCP_PRIV_NO_UWRITE: - signum = TARGET_SIGSEGV; - sigcode = TARGET_SEGV_MAPERR; - break; case EXCP_ATOMIC: cpu_exec_step_atomic(cs); break; @@ -79,17 +68,6 @@ void cpu_loop(CPUHexagonState *env) trapnr); exit(EXIT_FAILURE); } - - if (signum) { - target_siginfo_t info = { - .si_signo = signum, - .si_errno = 0, - .si_code = sigcode, - ._sifields._sigfault._addr = sigaddr - }; - queue_signal(env, info.si_signo, QEMU_SI_KILL, &info); - } - process_pending_signals(env); } } diff --git a/linux-user/hexagon/signal.c b/linux-user/hexagon/signal.c index c7f0bf6b92..74e61739a0 100644 --- a/linux-user/hexagon/signal.c +++ b/linux-user/hexagon/signal.c @@ -162,6 +162,11 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, setup_ucontext(&frame->uc, env, set); tswap_siginfo(&frame->info, info); + /* + * The on-stack signal trampoline is no longer executed; + * however, the libgcc signal frame unwinding code checks + * for the presence of these two numeric magic values. + */ install_sigtramp(frame->tramp); env->gpr[HEX_REG_PC] = ka->_sa_handler; @@ -171,8 +176,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, frame_addr + offsetof(struct target_rt_sigframe, info); env->gpr[HEX_REG_R02] = frame_addr + offsetof(struct target_rt_sigframe, uc); - env->gpr[HEX_REG_LR] = - frame_addr + offsetof(struct target_rt_sigframe, tramp); + env->gpr[HEX_REG_LR] = default_rt_sigreturn; return; @@ -271,3 +275,14 @@ badframe: force_sig(TARGET_SIGSEGV); return 0; } + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 4 * 2, 0); + assert(tramp != NULL); + + default_rt_sigreturn = sigtramp_page; + install_sigtramp(tramp); + + unlock_user(tramp, sigtramp_page, 4 * 2); +} diff --git a/linux-user/hexagon/target_signal.h b/linux-user/hexagon/target_signal.h index 345cf1cbb8..9e0223d322 100644 --- a/linux-user/hexagon/target_signal.h +++ b/linux-user/hexagon/target_signal.h @@ -31,4 +31,6 @@ typedef struct target_sigaltstack { #include "../generic/signal.h" +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* TARGET_SIGNAL_H */ diff --git a/linux-user/host/aarch64/host-signal.h b/linux-user/host/aarch64/host-signal.h new file mode 100644 index 0000000000..0c0b08383a --- /dev/null +++ b/linux-user/host/aarch64/host-signal.h @@ -0,0 +1,74 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (c) 2003-2005 Fabrice Bellard + * Copyright (c) 2021 Linaro Limited + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef AARCH64_HOST_SIGNAL_H +#define AARCH64_HOST_SIGNAL_H + +/* Pre-3.16 kernel headers don't have these, so provide fallback definitions */ +#ifndef ESR_MAGIC +#define ESR_MAGIC 0x45535201 +struct esr_context { + struct _aarch64_ctx head; + uint64_t esr; +}; +#endif + +static inline struct _aarch64_ctx *first_ctx(ucontext_t *uc) +{ + return (struct _aarch64_ctx *)&uc->uc_mcontext.__reserved; +} + +static inline struct _aarch64_ctx *next_ctx(struct _aarch64_ctx *hdr) +{ + return (struct _aarch64_ctx *)((char *)hdr + hdr->size); +} + +static inline uintptr_t host_signal_pc(ucontext_t *uc) +{ + return uc->uc_mcontext.pc; +} + +static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) +{ + struct _aarch64_ctx *hdr; + uint32_t insn; + + /* Find the esr_context, which has the WnR bit in it */ + for (hdr = first_ctx(uc); hdr->magic; hdr = next_ctx(hdr)) { + if (hdr->magic == ESR_MAGIC) { + struct esr_context const *ec = (struct esr_context const *)hdr; + uint64_t esr = ec->esr; + + /* For data aborts ESR.EC is 0b10010x: then bit 6 is the WnR bit */ + return extract32(esr, 27, 5) == 0x12 && extract32(esr, 6, 1) == 1; + } + } + + /* + * Fall back to parsing instructions; will only be needed + * for really ancient (pre-3.16) kernels. + */ + insn = *(uint32_t *)host_signal_pc(uc); + + return (insn & 0xbfff0000) == 0x0c000000 /* C3.3.1 */ + || (insn & 0xbfe00000) == 0x0c800000 /* C3.3.2 */ + || (insn & 0xbfdf0000) == 0x0d000000 /* C3.3.3 */ + || (insn & 0xbfc00000) == 0x0d800000 /* C3.3.4 */ + || (insn & 0x3f400000) == 0x08000000 /* C3.3.6 */ + || (insn & 0x3bc00000) == 0x39000000 /* C3.3.13 */ + || (insn & 0x3fc00000) == 0x3d800000 /* ... 128bit */ + /* Ignore bits 10, 11 & 21, controlling indexing. */ + || (insn & 0x3bc00000) == 0x38000000 /* C3.3.8-12 */ + || (insn & 0x3fe00000) == 0x3c800000 /* ... 128bit */ + /* Ignore bits 23 & 24, controlling indexing. */ + || (insn & 0x3a400000) == 0x28000000; /* C3.3.7,14-16 */ +} + +#endif diff --git a/linux-user/host/alpha/host-signal.h b/linux-user/host/alpha/host-signal.h new file mode 100644 index 0000000000..e080be412f --- /dev/null +++ b/linux-user/host/alpha/host-signal.h @@ -0,0 +1,42 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (c) 2003-2005 Fabrice Bellard + * Copyright (c) 2021 Linaro Limited + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef ALPHA_HOST_SIGNAL_H +#define ALPHA_HOST_SIGNAL_H + +static inline uintptr_t host_signal_pc(ucontext_t *uc) +{ + return uc->uc_mcontext.sc_pc; +} + +static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) +{ + uint32_t *pc = (uint32_t *)host_signal_pc(uc); + uint32_t insn = *pc; + + /* XXX: need kernel patch to get write flag faster */ + switch (insn >> 26) { + case 0x0d: /* stw */ + case 0x0e: /* stb */ + case 0x0f: /* stq_u */ + case 0x24: /* stf */ + case 0x25: /* stg */ + case 0x26: /* sts */ + case 0x27: /* stt */ + case 0x2c: /* stl */ + case 0x2d: /* stq */ + case 0x2e: /* stl_c */ + case 0x2f: /* stq_c */ + return true; + } + return false; +} + +#endif diff --git a/linux-user/host/arm/host-signal.h b/linux-user/host/arm/host-signal.h new file mode 100644 index 0000000000..efb165c0c5 --- /dev/null +++ b/linux-user/host/arm/host-signal.h @@ -0,0 +1,30 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (c) 2003-2005 Fabrice Bellard + * Copyright (c) 2021 Linaro Limited + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef ARM_HOST_SIGNAL_H +#define ARM_HOST_SIGNAL_H + +static inline uintptr_t host_signal_pc(ucontext_t *uc) +{ + return uc->uc_mcontext.arm_pc; +} + +static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) +{ + /* + * In the FSR, bit 11 is WnR, assuming a v6 or + * later processor. On v5 we will always report + * this as a read, which will fail later. + */ + uint32_t fsr = uc->uc_mcontext.error_code; + return extract32(fsr, 11, 1); +} + +#endif diff --git a/linux-user/host/i386/host-signal.h b/linux-user/host/i386/host-signal.h new file mode 100644 index 0000000000..4c8eef99ce --- /dev/null +++ b/linux-user/host/i386/host-signal.h @@ -0,0 +1,25 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (c) 2003-2005 Fabrice Bellard + * Copyright (c) 2021 Linaro Limited + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef I386_HOST_SIGNAL_H +#define I386_HOST_SIGNAL_H + +static inline uintptr_t host_signal_pc(ucontext_t *uc) +{ + return uc->uc_mcontext.gregs[REG_EIP]; +} + +static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) +{ + return uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe + && (uc->uc_mcontext.gregs[REG_ERR] & 0x2); +} + +#endif diff --git a/linux-user/host/mips/host-signal.h b/linux-user/host/mips/host-signal.h new file mode 100644 index 0000000000..ef341f7c20 --- /dev/null +++ b/linux-user/host/mips/host-signal.h @@ -0,0 +1,62 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (c) 2003-2005 Fabrice Bellard + * Copyright (c) 2021 Linaro Limited + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef MIPS_HOST_SIGNAL_H +#define MIPS_HOST_SIGNAL_H + +static inline uintptr_t host_signal_pc(ucontext_t *uc) +{ + return uc->uc_mcontext.pc; +} + +#if defined(__misp16) || defined(__mips_micromips) +#error "Unsupported encoding" +#endif + +static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) +{ + uint32_t insn = *(uint32_t *)host_signal_pc(uc); + + /* Detect all store instructions at program counter. */ + switch ((insn >> 26) & 077) { + case 050: /* SB */ + case 051: /* SH */ + case 052: /* SWL */ + case 053: /* SW */ + case 054: /* SDL */ + case 055: /* SDR */ + case 056: /* SWR */ + case 070: /* SC */ + case 071: /* SWC1 */ + case 074: /* SCD */ + case 075: /* SDC1 */ + case 077: /* SD */ +#if !defined(__mips_isa_rev) || __mips_isa_rev < 6 + case 072: /* SWC2 */ + case 076: /* SDC2 */ +#endif + return true; + case 023: /* COP1X */ + /* + * Required in all versions of MIPS64 since + * MIPS64r1 and subsequent versions of MIPS32r2. + */ + switch (insn & 077) { + case 010: /* SWXC1 */ + case 011: /* SDXC1 */ + case 015: /* SUXC1 */ + return true; + } + break; + } + return false; +} + +#endif diff --git a/linux-user/host/ppc/host-signal.h b/linux-user/host/ppc/host-signal.h new file mode 100644 index 0000000000..a491c413dc --- /dev/null +++ b/linux-user/host/ppc/host-signal.h @@ -0,0 +1,25 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (c) 2003-2005 Fabrice Bellard + * Copyright (c) 2021 Linaro Limited + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef PPC_HOST_SIGNAL_H +#define PPC_HOST_SIGNAL_H + +static inline uintptr_t host_signal_pc(ucontext_t *uc) +{ + return uc->uc_mcontext.regs->nip; +} + +static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) +{ + return uc->uc_mcontext.regs->trap != 0x400 + && (uc->uc_mcontext.regs->dsisr & 0x02000000); +} + +#endif diff --git a/linux-user/host/ppc64/host-signal.h b/linux-user/host/ppc64/host-signal.h new file mode 100644 index 0000000000..a353c22a90 --- /dev/null +++ b/linux-user/host/ppc64/host-signal.h @@ -0,0 +1 @@ +#include "../ppc/host-signal.h" diff --git a/linux-user/host/riscv/host-signal.h b/linux-user/host/riscv/host-signal.h new file mode 100644 index 0000000000..3b168cb58b --- /dev/null +++ b/linux-user/host/riscv/host-signal.h @@ -0,0 +1,58 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (c) 2003-2005 Fabrice Bellard + * Copyright (c) 2021 Linaro Limited + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef RISCV_HOST_SIGNAL_H +#define RISCV_HOST_SIGNAL_H + +static inline uintptr_t host_signal_pc(ucontext_t *uc) +{ + return uc->uc_mcontext.__gregs[REG_PC]; +} + +static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) +{ + /* + * Detect store by reading the instruction at the program counter. + * Do not read more than 16 bits, because we have not yet determined + * the size of the instruction. + */ + const uint16_t *pinsn = (const uint16_t *)host_signal_pc(uc); + uint16_t insn = pinsn[0]; + + /* 16-bit instructions */ + switch (insn & 0xe003) { + case 0xa000: /* c.fsd */ + case 0xc000: /* c.sw */ + case 0xe000: /* c.sd (rv64) / c.fsw (rv32) */ + case 0xa002: /* c.fsdsp */ + case 0xc002: /* c.swsp */ + case 0xe002: /* c.sdsp (rv64) / c.fswsp (rv32) */ + return true; + } + + /* 32-bit instructions, major opcodes */ + switch (insn & 0x7f) { + case 0x23: /* store */ + case 0x27: /* store-fp */ + return true; + case 0x2f: /* amo */ + /* + * The AMO function code is in bits 25-31, unread as yet. + * The AMO functions are LR (read), SC (write), and the + * rest are all read-modify-write. + */ + insn = pinsn[1]; + return (insn >> 11) != 2; /* LR */ + } + + return false; +} + +#endif diff --git a/linux-user/host/riscv64/hostdep.h b/linux-user/host/riscv/hostdep.h similarity index 94% rename from linux-user/host/riscv64/hostdep.h rename to linux-user/host/riscv/hostdep.h index 865f0fb9ff..2ba07456ae 100644 --- a/linux-user/host/riscv64/hostdep.h +++ b/linux-user/host/riscv/hostdep.h @@ -5,8 +5,8 @@ * See the COPYING file in the top-level directory. */ -#ifndef RISCV64_HOSTDEP_H -#define RISCV64_HOSTDEP_H +#ifndef RISCV_HOSTDEP_H +#define RISCV_HOSTDEP_H /* We have a safe-syscall.inc.S */ #define HAVE_SAFE_SYSCALL diff --git a/linux-user/host/riscv64/safe-syscall.inc.S b/linux-user/host/riscv/safe-syscall.inc.S similarity index 100% rename from linux-user/host/riscv64/safe-syscall.inc.S rename to linux-user/host/riscv/safe-syscall.inc.S diff --git a/linux-user/host/riscv32/hostdep.h b/linux-user/host/riscv32/hostdep.h deleted file mode 100644 index adf9edbf2d..0000000000 --- a/linux-user/host/riscv32/hostdep.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * hostdep.h : things which are dependent on the host architecture - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef RISCV32_HOSTDEP_H -#define RISCV32_HOSTDEP_H - -#endif diff --git a/linux-user/host/s390/host-signal.h b/linux-user/host/s390/host-signal.h new file mode 100644 index 0000000000..26990e4893 --- /dev/null +++ b/linux-user/host/s390/host-signal.h @@ -0,0 +1,93 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (c) 2003-2005 Fabrice Bellard + * Copyright (c) 2021 Linaro Limited + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef S390_HOST_SIGNAL_H +#define S390_HOST_SIGNAL_H + +static inline uintptr_t host_signal_pc(ucontext_t *uc) +{ + return uc->uc_mcontext.psw.addr; +} + +static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) +{ + uint16_t *pinsn = (uint16_t *)host_signal_pc(uc); + + /* + * ??? On linux, the non-rt signal handler has 4 (!) arguments instead + * of the normal 2 arguments. The 4th argument contains the "Translation- + * Exception Identification for DAT Exceptions" from the hardware (aka + * "int_parm_long"), which does in fact contain the is_write value. + * The rt signal handler, as far as I can tell, does not give this value + * at all. Not that we could get to it from here even if it were. + * So fall back to parsing instructions. Treat read-modify-write ones as + * writes, which is not fully correct, but for tracking self-modifying code + * this is better than treating them as reads. Checking si_addr page flags + * might be a viable improvement, albeit a racy one. + */ + /* ??? This is not even close to complete. */ + switch (pinsn[0] >> 8) { + case 0x50: /* ST */ + case 0x42: /* STC */ + case 0x40: /* STH */ + case 0xba: /* CS */ + case 0xbb: /* CDS */ + return true; + case 0xc4: /* RIL format insns */ + switch (pinsn[0] & 0xf) { + case 0xf: /* STRL */ + case 0xb: /* STGRL */ + case 0x7: /* STHRL */ + return true; + } + break; + case 0xc8: /* SSF format insns */ + switch (pinsn[0] & 0xf) { + case 0x2: /* CSST */ + return true; + } + break; + case 0xe3: /* RXY format insns */ + switch (pinsn[2] & 0xff) { + case 0x50: /* STY */ + case 0x24: /* STG */ + case 0x72: /* STCY */ + case 0x70: /* STHY */ + case 0x8e: /* STPQ */ + case 0x3f: /* STRVH */ + case 0x3e: /* STRV */ + case 0x2f: /* STRVG */ + return true; + } + break; + case 0xeb: /* RSY format insns */ + switch (pinsn[2] & 0xff) { + case 0x14: /* CSY */ + case 0x30: /* CSG */ + case 0x31: /* CDSY */ + case 0x3e: /* CDSG */ + case 0xe4: /* LANG */ + case 0xe6: /* LAOG */ + case 0xe7: /* LAXG */ + case 0xe8: /* LAAG */ + case 0xea: /* LAALG */ + case 0xf4: /* LAN */ + case 0xf6: /* LAO */ + case 0xf7: /* LAX */ + case 0xfa: /* LAAL */ + case 0xf8: /* LAA */ + return true; + } + break; + } + return false; +} + +#endif diff --git a/linux-user/host/s390x/host-signal.h b/linux-user/host/s390x/host-signal.h new file mode 100644 index 0000000000..0e83f9358d --- /dev/null +++ b/linux-user/host/s390x/host-signal.h @@ -0,0 +1 @@ +#include "../s390/host-signal.h" diff --git a/linux-user/host/sparc/host-signal.h b/linux-user/host/sparc/host-signal.h new file mode 100644 index 0000000000..5e71d33f8e --- /dev/null +++ b/linux-user/host/sparc/host-signal.h @@ -0,0 +1,54 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (c) 2003-2005 Fabrice Bellard + * Copyright (c) 2021 Linaro Limited + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef SPARC_HOST_SIGNAL_H +#define SPARC_HOST_SIGNAL_H + +static inline uintptr_t host_signal_pc(ucontext_t *uc) +{ +#ifdef __arch64__ + return uc->uc_mcontext.mc_gregs[MC_PC]; +#else + return uc->uc_mcontext.gregs[REG_PC]; +#endif +} + +static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) +{ + uint32_t insn = *(uint32_t *)host_signal_pc(uc); + + if ((insn >> 30) == 3) { + switch ((insn >> 19) & 0x3f) { + case 0x05: /* stb */ + case 0x15: /* stba */ + case 0x06: /* sth */ + case 0x16: /* stha */ + case 0x04: /* st */ + case 0x14: /* sta */ + case 0x07: /* std */ + case 0x17: /* stda */ + case 0x0e: /* stx */ + case 0x1e: /* stxa */ + case 0x24: /* stf */ + case 0x34: /* stfa */ + case 0x27: /* stdf */ + case 0x37: /* stdfa */ + case 0x26: /* stqf */ + case 0x36: /* stqfa */ + case 0x25: /* stfsr */ + case 0x3c: /* casa */ + case 0x3e: /* casxa */ + return true; + } + } + return false; +} + +#endif diff --git a/linux-user/host/sparc64/host-signal.h b/linux-user/host/sparc64/host-signal.h new file mode 100644 index 0000000000..1191fe2d40 --- /dev/null +++ b/linux-user/host/sparc64/host-signal.h @@ -0,0 +1 @@ +#include "../sparc/host-signal.h" diff --git a/linux-user/host/x32/host-signal.h b/linux-user/host/x32/host-signal.h new file mode 100644 index 0000000000..26800591d3 --- /dev/null +++ b/linux-user/host/x32/host-signal.h @@ -0,0 +1 @@ +#include "../x86_64/host-signal.h" diff --git a/linux-user/host/x86_64/host-signal.h b/linux-user/host/x86_64/host-signal.h new file mode 100644 index 0000000000..883d2fcf65 --- /dev/null +++ b/linux-user/host/x86_64/host-signal.h @@ -0,0 +1,24 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (C) 2021 Linaro Limited + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef X86_64_HOST_SIGNAL_H +#define X86_64_HOST_SIGNAL_H + +static inline uintptr_t host_signal_pc(ucontext_t *uc) +{ + return uc->uc_mcontext.gregs[REG_RIP]; +} + +static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) +{ + return uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe + && (uc->uc_mcontext.gregs[REG_ERR] & 0x2); +} + +#endif diff --git a/linux-user/hppa/cpu_loop.c b/linux-user/hppa/cpu_loop.c index 81607a9b27..375576c8f0 100644 --- a/linux-user/hppa/cpu_loop.c +++ b/linux-user/hppa/cpu_loop.c @@ -144,29 +144,6 @@ void cpu_loop(CPUHPPAState *env) env->iaoq_f = env->gr[31]; env->iaoq_b = env->gr[31] + 4; break; - case EXCP_ITLB_MISS: - case EXCP_DTLB_MISS: - case EXCP_NA_ITLB_MISS: - case EXCP_NA_DTLB_MISS: - case EXCP_IMP: - case EXCP_DMP: - case EXCP_DMB: - case EXCP_PAGE_REF: - case EXCP_DMAR: - case EXCP_DMPI: - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - info.si_code = TARGET_SEGV_ACCERR; - info._sifields._sigfault._addr = env->cr[CR_IOR]; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; - case EXCP_UNALIGN: - info.si_signo = TARGET_SIGBUS; - info.si_errno = 0; - info.si_code = 0; - info._sifields._sigfault._addr = env->cr[CR_IOR]; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; case EXCP_ILL: case EXCP_PRIV_OPR: case EXCP_PRIV_REG: diff --git a/linux-user/hppa/target_signal.h b/linux-user/hppa/target_signal.h index 7f525362e9..d558119ee7 100644 --- a/linux-user/hppa/target_signal.h +++ b/linux-user/hppa/target_signal.h @@ -71,4 +71,18 @@ typedef struct target_sigaltstack { /* mask for all SS_xxx flags */ #define TARGET_SS_FLAG_BITS TARGET_SS_AUTODISARM +/* + * We cannot use a bare sigtramp page for hppa-linux. + * + * Unlike other guests where we use the instructions at PC to validate + * an offset from SP, the hppa libgcc signal frame fallback unwinding uses + * the PC address itself to find the frame. This is due to the fact that + * the hppa grows the stack upward, and the frame is of unknown size. + * + * TODO: We should be able to use a VDSO to address this, by providing + * proper unwind info for the sigtramp code, at which point the fallback + * unwinder will not be used. + */ +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 0 + #endif /* HPPA_TARGET_SIGNAL_H */ diff --git a/linux-user/i386/signal.c b/linux-user/i386/signal.c index 3b4b55fc0a..433efa3d69 100644 --- a/linux-user/i386/signal.c +++ b/linux-user/i386/signal.c @@ -310,6 +310,22 @@ get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size) } #ifndef TARGET_X86_64 +static void install_sigtramp(void *tramp) +{ + /* This is popl %eax ; movl $syscall,%eax ; int $0x80 */ + __put_user(0xb858, (uint16_t *)(tramp + 0)); + __put_user(TARGET_NR_sigreturn, (int32_t *)(tramp + 2)); + __put_user(0x80cd, (uint16_t *)(tramp + 6)); +} + +static void install_rt_sigtramp(void *tramp) +{ + /* This is movl $syscall,%eax ; int $0x80 */ + __put_user(0xb8, (uint8_t *)(tramp + 0)); + __put_user(TARGET_NR_rt_sigreturn, (int32_t *)(tramp + 1)); + __put_user(0x80cd, (uint16_t *)(tramp + 5)); +} + /* compare linux/arch/i386/kernel/signal.c:setup_frame() */ void setup_frame(int sig, struct target_sigaction *ka, target_sigset_t *set, CPUX86State *env) @@ -338,16 +354,9 @@ void setup_frame(int sig, struct target_sigaction *ka, if (ka->sa_flags & TARGET_SA_RESTORER) { __put_user(ka->sa_restorer, &frame->pretcode); } else { - uint16_t val16; - abi_ulong retcode_addr; - retcode_addr = frame_addr + offsetof(struct sigframe, retcode); - __put_user(retcode_addr, &frame->pretcode); - /* This is popl %eax ; movl $,%eax ; int $0x80 */ - val16 = 0xb858; - __put_user(val16, (uint16_t *)(frame->retcode+0)); - __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2)); - val16 = 0x80cd; - __put_user(val16, (uint16_t *)(frame->retcode+6)); + /* This is no longer used, but is retained for ABI compatibility. */ + install_sigtramp(frame->retcode); + __put_user(default_sigreturn, &frame->pretcode); } /* Set up registers for signal handler */ @@ -412,24 +421,18 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, /* Set up to return from userspace. If provided, use a stub already in userspace. */ -#ifndef TARGET_X86_64 if (ka->sa_flags & TARGET_SA_RESTORER) { __put_user(ka->sa_restorer, &frame->pretcode); } else { - uint16_t val16; - addr = frame_addr + offsetof(struct rt_sigframe, retcode); - __put_user(addr, &frame->pretcode); - /* This is movl $,%eax ; int $0x80 */ - __put_user(0xb8, (char *)(frame->retcode+0)); - __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1)); - val16 = 0x80cd; - __put_user(val16, (uint16_t *)(frame->retcode+5)); - } +#ifdef TARGET_X86_64 + /* For x86_64, SA_RESTORER is required ABI. */ + goto give_sigsegv; #else - /* XXX: Would be slightly better to return -EFAULT here if test fails - assert(ka->sa_flags & TARGET_SA_RESTORER); */ - __put_user(ka->sa_restorer, &frame->pretcode); + /* This is no longer used, but is retained for ABI compatibility. */ + install_rt_sigtramp(frame->retcode); + __put_user(default_rt_sigreturn, &frame->pretcode); #endif + } /* Set up registers for signal handler */ env->regs[R_ESP] = frame_addr; @@ -592,3 +595,19 @@ badframe: force_sig(TARGET_SIGSEGV); return -TARGET_QEMU_ESIGRETURN; } + +#ifndef TARGET_X86_64 +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint16_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 * 8, 0); + assert(tramp != NULL); + + default_sigreturn = sigtramp_page; + install_sigtramp(tramp); + + default_rt_sigreturn = sigtramp_page + 8; + install_rt_sigtramp(tramp + 8); + + unlock_user(tramp, sigtramp_page, 2 * 8); +} +#endif diff --git a/linux-user/i386/target_signal.h b/linux-user/i386/target_signal.h index 50361af874..64d09f2e75 100644 --- a/linux-user/i386/target_signal.h +++ b/linux-user/i386/target_signal.h @@ -22,4 +22,6 @@ typedef struct target_sigaltstack { #include "../generic/signal.h" #define TARGET_ARCH_HAS_SETUP_FRAME +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* I386_TARGET_SIGNAL_H */ diff --git a/linux-user/m68k/cpu_loop.c b/linux-user/m68k/cpu_loop.c index ebf32be78f..790bd558c3 100644 --- a/linux-user/m68k/cpu_loop.c +++ b/linux-user/m68k/cpu_loop.c @@ -90,16 +90,6 @@ void cpu_loop(CPUM68KState *env) case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ break; - case EXCP_ACCESS: - { - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - /* XXX: check env->error_code */ - info.si_code = TARGET_SEGV_MAPERR; - info._sifields._sigfault._addr = env->mmu.ar; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - } - break; case EXCP_DEBUG: info.si_signo = TARGET_SIGTRAP; info.si_errno = 0; diff --git a/linux-user/m68k/signal.c b/linux-user/m68k/signal.c index 4f8eb6f727..ec33482e14 100644 --- a/linux-user/m68k/signal.c +++ b/linux-user/m68k/signal.c @@ -39,7 +39,6 @@ struct target_sigframe int sig; int code; abi_ulong psc; - char retcode[8]; abi_ulong extramask[TARGET_NSIG_WORDS-1]; struct target_sigcontext sc; }; @@ -76,7 +75,6 @@ struct target_rt_sigframe int sig; abi_ulong pinfo; abi_ulong puc; - char retcode[8]; struct target_siginfo info; struct target_ucontext uc; }; @@ -130,7 +128,6 @@ void setup_frame(int sig, struct target_sigaction *ka, { struct target_sigframe *frame; abi_ulong frame_addr; - abi_ulong retcode_addr; abi_ulong sc_addr; int i; @@ -152,16 +149,7 @@ void setup_frame(int sig, struct target_sigaction *ka, } /* Set up to return from userspace. */ - - retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); - __put_user(retcode_addr, &frame->pretcode); - - /* moveq #,d0; trap #0 */ - - __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16), - (uint32_t *)(frame->retcode)); - - /* Set up to return from userspace */ + __put_user(default_sigreturn, &frame->pretcode); env->aregs[7] = frame_addr; env->pc = ka->_sa_handler; @@ -288,7 +276,6 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, { struct target_rt_sigframe *frame; abi_ulong frame_addr; - abi_ulong retcode_addr; abi_ulong info_addr; abi_ulong uc_addr; int err = 0; @@ -325,17 +312,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, } /* Set up to return from userspace. */ - - retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode); - __put_user(retcode_addr, &frame->pretcode); - - /* moveq #,d0; notb d0; trap #0 */ - - __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16), - (uint32_t *)(frame->retcode + 0)); - __put_user(0x4e40, (uint16_t *)(frame->retcode + 4)); - - /* Set up to return from userspace */ + __put_user(default_rt_sigreturn, &frame->pretcode); env->aregs[7] = frame_addr; env->pc = ka->_sa_handler; @@ -411,3 +388,23 @@ badframe: force_sig(TARGET_SIGSEGV); return -TARGET_QEMU_ESIGRETURN; } + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + void *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 4 + 6, 0); + assert(tramp != NULL); + + default_sigreturn = sigtramp_page; + + /* moveq #,d0; trap #0 */ + __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16), (uint32_t *)tramp); + + default_rt_sigreturn = sigtramp_page + 4; + + /* moveq #,d0; notb d0; trap #0 */ + __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16), + (uint32_t *)(tramp + 4)); + __put_user(0x4e40, (uint16_t *)(tramp + 8)); + + unlock_user(tramp, sigtramp_page, 4 + 6); +} diff --git a/linux-user/m68k/target_signal.h b/linux-user/m68k/target_signal.h index d096544ef8..94157bf1f4 100644 --- a/linux-user/m68k/target_signal.h +++ b/linux-user/m68k/target_signal.h @@ -22,4 +22,6 @@ typedef struct target_sigaltstack { #include "../generic/signal.h" #define TARGET_ARCH_HAS_SETUP_FRAME +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* M68K_TARGET_SIGNAL_H */ diff --git a/linux-user/meson.build b/linux-user/meson.build index 9549f81682..bf62c13e37 100644 --- a/linux-user/meson.build +++ b/linux-user/meson.build @@ -1,3 +1,7 @@ +if not have_linux_user + subdir_done() +endif + linux_user_ss.add(files( 'elfload.c', 'exit.c', diff --git a/linux-user/microblaze/cpu_loop.c b/linux-user/microblaze/cpu_loop.c index 52222eb93f..a94467dd2d 100644 --- a/linux-user/microblaze/cpu_loop.c +++ b/linux-user/microblaze/cpu_loop.c @@ -37,16 +37,6 @@ void cpu_loop(CPUMBState *env) process_queued_cpu_work(cs); switch (trapnr) { - case 0xaa: - { - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - /* XXX: check env->error_code */ - info.si_code = TARGET_SEGV_MAPERR; - info._sifields._sigfault._addr = 0; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - } - break; case EXCP_INTERRUPT: /* just indicate that signals should be handled asap */ break; diff --git a/linux-user/microblaze/signal.c b/linux-user/microblaze/signal.c index b822679d18..8ebb6a1b7d 100644 --- a/linux-user/microblaze/signal.c +++ b/linux-user/microblaze/signal.c @@ -161,17 +161,11 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, /* Kernel does not use SA_RESTORER. */ - /* addi r12, r0, __NR_sigreturn */ - __put_user(0x31800000U | TARGET_NR_rt_sigreturn, frame->tramp + 0); - /* brki r14, 0x8 */ - __put_user(0xb9cc0008U, frame->tramp + 1); - /* * Return from sighandler will jump to the tramp. * Negative 8 offset because return is rtsd r15, 8 */ - env->regs[15] = - frame_addr + offsetof(struct target_rt_sigframe, tramp) - 8; + env->regs[15] = default_rt_sigreturn - 8; /* Set up registers for signal handler */ env->regs[1] = frame_addr; @@ -220,3 +214,19 @@ long do_rt_sigreturn(CPUMBState *env) force_sig(TARGET_SIGSEGV); return -TARGET_QEMU_ESIGRETURN; } + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 8, 0); + assert(tramp != NULL); + + /* + * addi r12, r0, __NR_rt_sigreturn + * brki r14, 0x8 + */ + __put_user(0x31800000U | TARGET_NR_rt_sigreturn, tramp); + __put_user(0xb9cc0008U, tramp + 1); + + default_rt_sigreturn = sigtramp_page; + unlock_user(tramp, sigtramp_page, 8); +} diff --git a/linux-user/microblaze/target_signal.h b/linux-user/microblaze/target_signal.h index 1c326296de..e8b510f6b1 100644 --- a/linux-user/microblaze/target_signal.h +++ b/linux-user/microblaze/target_signal.h @@ -21,4 +21,6 @@ typedef struct target_sigaltstack { #include "../generic/signal.h" +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* MICROBLAZE_TARGET_SIGNAL_H */ diff --git a/linux-user/mips/cpu_loop.c b/linux-user/mips/cpu_loop.c index cb03fb066b..b735c99a24 100644 --- a/linux-user/mips/cpu_loop.c +++ b/linux-user/mips/cpu_loop.c @@ -158,17 +158,6 @@ done_syscall: } env->active_tc.gpr[2] = ret; break; - case EXCP_TLBL: - case EXCP_TLBS: - case EXCP_AdEL: - case EXCP_AdES: - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - /* XXX: check env->error_code */ - info.si_code = TARGET_SEGV_MAPERR; - info._sifields._sigfault._addr = env->CP0_BadVAddr; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; case EXCP_CpU: case EXCP_RI: info.si_signo = TARGET_SIGILL; diff --git a/linux-user/mips/signal.c b/linux-user/mips/signal.c index d174b3453c..8f79e405ec 100644 --- a/linux-user/mips/signal.c +++ b/linux-user/mips/signal.c @@ -87,10 +87,8 @@ struct target_rt_sigframe { }; /* Install trampoline to jump back from signal handler */ -static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall) +static void install_sigtramp(uint32_t *tramp, unsigned int syscall) { - int err = 0; - /* * Set up the return code ... * @@ -100,7 +98,6 @@ static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall) __put_user(0x24020000 + syscall, tramp + 0); __put_user(0x0000000c , tramp + 1); - return err; } static inline void setup_sigcontext(CPUMIPSState *regs, @@ -212,8 +209,6 @@ void setup_frame(int sig, struct target_sigaction * ka, goto give_sigsegv; } - install_sigtramp(frame->sf_code, TARGET_NR_sigreturn); - setup_sigcontext(regs, &frame->sf_sc); for(i = 0; i < TARGET_NSIG_WORDS; i++) { @@ -234,7 +229,7 @@ void setup_frame(int sig, struct target_sigaction * ka, regs->active_tc.gpr[ 5] = 0; regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc); regs->active_tc.gpr[29] = frame_addr; - regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code); + regs->active_tc.gpr[31] = default_sigreturn; /* The original kernel code sets CP0_EPC to the handler * since it returns to userland using eret * we cannot do this here, and we must set PC directly */ @@ -308,8 +303,6 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, goto give_sigsegv; } - install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn); - tswap_siginfo(&frame->rs_info, info); __put_user(0, &frame->rs_uc.tuc_flags); @@ -338,11 +331,13 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, env->active_tc.gpr[ 6] = frame_addr + offsetof(struct target_rt_sigframe, rs_uc); env->active_tc.gpr[29] = frame_addr; - env->active_tc.gpr[31] = frame_addr - + offsetof(struct target_rt_sigframe, rs_code); - /* The original kernel code sets CP0_EPC to the handler - * since it returns to userland using eret - * we cannot do this here, and we must set PC directly */ + env->active_tc.gpr[31] = default_rt_sigreturn; + + /* + * The original kernel code sets CP0_EPC to the handler + * since it returns to userland using eret + * we cannot do this here, and we must set PC directly + */ env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler; mips_set_hflags_isa_mode_from_pc(env); unlock_user_struct(frame, frame_addr, 1); @@ -382,3 +377,19 @@ badframe: force_sig(TARGET_SIGSEGV); return -TARGET_QEMU_ESIGRETURN; } + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 * 8, 0); + assert(tramp != NULL); + +#ifdef TARGET_ARCH_HAS_SETUP_FRAME + default_sigreturn = sigtramp_page; + install_sigtramp(tramp, TARGET_NR_sigreturn); +#endif + + default_rt_sigreturn = sigtramp_page + 8; + install_sigtramp(tramp + 2, TARGET_NR_rt_sigreturn); + + unlock_user(tramp, sigtramp_page, 2 * 8); +} diff --git a/linux-user/mips/target_signal.h b/linux-user/mips/target_signal.h index d521765f6b..780a4ddf29 100644 --- a/linux-user/mips/target_signal.h +++ b/linux-user/mips/target_signal.h @@ -73,6 +73,7 @@ typedef struct target_sigaltstack { /* compare linux/arch/mips/kernel/signal.c:setup_frame() */ #define TARGET_ARCH_HAS_SETUP_FRAME #endif +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 /* bit-flags */ #define TARGET_SS_AUTODISARM (1U << 31) /* disable sas during sighandling */ diff --git a/linux-user/mips64/target_signal.h b/linux-user/mips64/target_signal.h index d857c55e4c..275e9b7f9a 100644 --- a/linux-user/mips64/target_signal.h +++ b/linux-user/mips64/target_signal.h @@ -76,4 +76,6 @@ typedef struct target_sigaltstack { /* compare linux/arch/mips/kernel/signal.c:setup_frame() */ #define TARGET_ARCH_HAS_SETUP_FRAME #endif +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* MIPS64_TARGET_SIGNAL_H */ diff --git a/linux-user/nios2/target_signal.h b/linux-user/nios2/target_signal.h index aebf749f12..fe266c4c51 100644 --- a/linux-user/nios2/target_signal.h +++ b/linux-user/nios2/target_signal.h @@ -19,4 +19,7 @@ typedef struct target_sigaltstack { #include "../generic/signal.h" +/* Nios2 uses a fixed address on the kuser page for sigreturn. */ +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 0 + #endif /* NIOS2_TARGET_SIGNAL_H */ diff --git a/linux-user/openrisc/cpu_loop.c b/linux-user/openrisc/cpu_loop.c index f6360db47c..3cfdbbf037 100644 --- a/linux-user/openrisc/cpu_loop.c +++ b/linux-user/openrisc/cpu_loop.c @@ -54,15 +54,6 @@ void cpu_loop(CPUOpenRISCState *env) cpu_set_gpr(env, 11, ret); } break; - case EXCP_DPF: - case EXCP_IPF: - case EXCP_RANGE: - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - info.si_code = TARGET_SEGV_MAPERR; - info._sifields._sigfault._addr = env->pc; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; case EXCP_ALIGN: info.si_signo = TARGET_SIGBUS; info.si_errno = 0; @@ -77,13 +68,6 @@ void cpu_loop(CPUOpenRISCState *env) info._sifields._sigfault._addr = env->pc; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; - case EXCP_FPE: - info.si_signo = TARGET_SIGFPE; - info.si_errno = 0; - info.si_code = 0; - info._sifields._sigfault._addr = env->pc; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; case EXCP_INTERRUPT: /* We processed the pending cpu work above. */ break; @@ -96,6 +80,15 @@ void cpu_loop(CPUOpenRISCState *env) case EXCP_ATOMIC: cpu_exec_step_atomic(cs); break; + case EXCP_RANGE: + /* Requires SR.OVE set, which linux-user won't do. */ + cpu_abort(cs, "Unexpected RANGE exception"); + case EXCP_FPE: + /* + * Requires FPSCR.FPEE set. Writes to FPSCR from usermode not + * yet enabled in kernel ABI, so linux-user does not either. + */ + cpu_abort(cs, "Unexpected FPE exception"); default: g_assert_not_reached(); } diff --git a/linux-user/openrisc/signal.c b/linux-user/openrisc/signal.c index ca2532bf50..be8b68784a 100644 --- a/linux-user/openrisc/signal.c +++ b/linux-user/openrisc/signal.c @@ -38,7 +38,6 @@ typedef struct target_ucontext { typedef struct target_rt_sigframe { struct target_siginfo info; target_ucontext uc; - uint32_t retcode[4]; /* trampoline code */ } target_rt_sigframe; static void restore_sigcontext(CPUOpenRISCState *env, target_sigcontext *sc) @@ -116,14 +115,8 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); } - /* This is l.ori r11,r0,__NR_sigreturn; l.sys 1; l.nop; l.nop */ - __put_user(0xa9600000 | TARGET_NR_rt_sigreturn, frame->retcode + 0); - __put_user(0x20000001, frame->retcode + 1); - __put_user(0x15000000, frame->retcode + 2); - __put_user(0x15000000, frame->retcode + 3); - /* Set up registers for signal handler */ - cpu_set_gpr(env, 9, frame_addr + offsetof(target_rt_sigframe, retcode)); + cpu_set_gpr(env, 9, default_rt_sigreturn); cpu_set_gpr(env, 3, sig); cpu_set_gpr(env, 4, frame_addr + offsetof(target_rt_sigframe, info)); cpu_set_gpr(env, 5, frame_addr + offsetof(target_rt_sigframe, uc)); @@ -169,3 +162,16 @@ long do_rt_sigreturn(CPUOpenRISCState *env) force_sig(TARGET_SIGSEGV); return 0; } + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 8, 0); + assert(tramp != NULL); + + /* This is l.ori r11,r0,__NR_sigreturn; l.sys 1 */ + __put_user(0xa9600000 | TARGET_NR_rt_sigreturn, tramp + 0); + __put_user(0x20000001, tramp + 1); + + default_rt_sigreturn = sigtramp_page; + unlock_user(tramp, sigtramp_page, 8); +} diff --git a/linux-user/openrisc/target_signal.h b/linux-user/openrisc/target_signal.h index 8283eaf544..077ec3d5e8 100644 --- a/linux-user/openrisc/target_signal.h +++ b/linux-user/openrisc/target_signal.h @@ -26,4 +26,6 @@ typedef struct target_sigaltstack { #include "../generic/signal.h" +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* OPENRISC_TARGET_SIGNAL_H */ diff --git a/linux-user/ppc/cpu_loop.c b/linux-user/ppc/cpu_loop.c index 840b23736b..483e669300 100644 --- a/linux-user/ppc/cpu_loop.c +++ b/linux-user/ppc/cpu_loop.c @@ -162,14 +162,6 @@ void cpu_loop(CPUPPCState *env) cpu_abort(cs, "External interrupt while in user mode. " "Aborting\n"); break; - case POWERPC_EXCP_ALIGN: /* Alignment exception */ - /* XXX: check this */ - info.si_signo = TARGET_SIGBUS; - info.si_errno = 0; - info.si_code = TARGET_BUS_ADRALN; - info._sifields._sigfault._addr = env->nip; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; case POWERPC_EXCP_PROGRAM: /* Program exception */ case POWERPC_EXCP_HV_EMU: /* HV emulation */ /* XXX: check this */ diff --git a/linux-user/ppc/signal.c b/linux-user/ppc/signal.c index e4d0dfa3bf..90a0369632 100644 --- a/linux-user/ppc/signal.c +++ b/linux-user/ppc/signal.c @@ -203,9 +203,6 @@ struct target_func_ptr { #endif -/* We use the mc_pad field for the signal return trampoline. */ -#define tramp mc_pad - /* See arch/powerpc/kernel/signal.c. */ static target_ulong get_sigframe(struct target_sigaction *ka, CPUPPCState *env, @@ -245,7 +242,7 @@ static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame) __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); - __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); + __put_user(cpu_read_xer(env), &frame->mc_gregs[TARGET_PT_XER]); for (i = 0; i < ARRAY_SIZE(env->crf); i++) { ccr |= env->crf[i] << (32 - ((i + 1) * 4)); @@ -309,10 +306,8 @@ static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame) static void encode_trampoline(int sigret, uint32_t *tramp) { /* Set up the sigreturn trampoline: li r0,sigret; sc. */ - if (sigret) { - __put_user(0x38000000 | sigret, &tramp[0]); - __put_user(0x44000002, &tramp[1]); - } + __put_user(0x38000000 | sigret, &tramp[0]); + __put_user(0x44000002, &tramp[1]); } static void restore_user_regs(CPUPPCState *env, @@ -320,6 +315,7 @@ static void restore_user_regs(CPUPPCState *env, { target_ulong save_r2 = 0; target_ulong msr; + target_ulong xer; target_ulong ccr; int i; @@ -335,9 +331,11 @@ static void restore_user_regs(CPUPPCState *env, __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]); __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]); __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]); - __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]); - __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); + __get_user(xer, &frame->mc_gregs[TARGET_PT_XER]); + cpu_write_xer(env, xer); + + __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]); for (i = 0; i < ARRAY_SIZE(env->crf); i++) { env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf; } @@ -438,12 +436,7 @@ void setup_frame(int sig, struct target_sigaction *ka, /* Save user regs. */ save_user_regs(env, &frame->mctx); - /* Construct the trampoline code on the stack. */ - encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp); - - /* The kernel checks for the presence of a VDSO here. We don't - emulate a vdso, so use a sigreturn system call. */ - env->lr = (target_ulong) h2g(frame->mctx.tramp); + env->lr = default_sigreturn; /* Turn off all fp exceptions. */ env->fpscr = 0; @@ -479,7 +472,6 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, target_sigset_t *set, CPUPPCState *env) { struct target_rt_sigframe *rt_sf; - uint32_t *trampptr = 0; struct target_mcontext *mctx = 0; target_ulong rt_sf_addr, newsp = 0; int i, err = 0; @@ -509,22 +501,17 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, #if defined(TARGET_PPC64) mctx = &rt_sf->uc.tuc_sigcontext.mcontext; - trampptr = &rt_sf->trampoline[0]; sc = &rt_sf->uc.tuc_sigcontext; __put_user(h2g(mctx), &sc->regs); __put_user(sig, &sc->signal); #else mctx = &rt_sf->uc.tuc_mcontext; - trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp; #endif save_user_regs(env, mctx); - encode_trampoline(TARGET_NR_rt_sigreturn, trampptr); - /* The kernel checks for the presence of a VDSO here. We don't - emulate a vdso, so use a sigreturn system call. */ - env->lr = (target_ulong) h2g(trampptr); + env->lr = default_rt_sigreturn; /* Turn off all fp exceptions. */ env->fpscr = 0; @@ -722,3 +709,19 @@ abi_long do_swapcontext(CPUArchState *env, abi_ulong uold_ctx, return 0; } + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 * 8, 0); + assert(tramp != NULL); + +#ifdef TARGET_ARCH_HAS_SETUP_FRAME + default_sigreturn = sigtramp_page; + encode_trampoline(TARGET_NR_sigreturn, tramp + 0); +#endif + + default_rt_sigreturn = sigtramp_page + 8; + encode_trampoline(TARGET_NR_rt_sigreturn, tramp + 2); + + unlock_user(tramp, sigtramp_page, 2 * 8); +} diff --git a/linux-user/ppc/target_signal.h b/linux-user/ppc/target_signal.h index 72fcdd9bfa..82184ab8f2 100644 --- a/linux-user/ppc/target_signal.h +++ b/linux-user/ppc/target_signal.h @@ -24,4 +24,6 @@ typedef struct target_sigaltstack { #if !defined(TARGET_PPC64) #define TARGET_ARCH_HAS_SETUP_FRAME #endif +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* PPC_TARGET_SIGNAL_H */ diff --git a/linux-user/riscv/cpu_loop.c b/linux-user/riscv/cpu_loop.c index 9859a366e4..b301dac802 100644 --- a/linux-user/riscv/cpu_loop.c +++ b/linux-user/riscv/cpu_loop.c @@ -87,13 +87,6 @@ void cpu_loop(CPURISCVState *env) sigcode = TARGET_TRAP_BRKPT; sigaddr = env->pc; break; - case RISCV_EXCP_INST_PAGE_FAULT: - case RISCV_EXCP_LOAD_PAGE_FAULT: - case RISCV_EXCP_STORE_PAGE_FAULT: - signum = TARGET_SIGSEGV; - sigcode = TARGET_SEGV_MAPERR; - sigaddr = env->badaddr; - break; case RISCV_EXCP_SEMIHOST: env->gpr[xA0] = do_common_semihosting(cs); env->pc += 4; @@ -133,7 +126,7 @@ void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) env->gpr[xSP] = regs->sp; env->elf_flags = info->elf_flags; - if ((env->misa & RVE) && !(env->elf_flags & EF_RISCV_RVE)) { + if ((env->misa_ext & RVE) && !(env->elf_flags & EF_RISCV_RVE)) { error_report("Incompatible ELF: RVE cpu requires RVE ABI binary"); exit(EXIT_FAILURE); } diff --git a/linux-user/riscv/signal.c b/linux-user/riscv/signal.c index f7f33bc90a..a0f9542ce3 100644 --- a/linux-user/riscv/signal.c +++ b/linux-user/riscv/signal.c @@ -47,7 +47,6 @@ struct target_ucontext { }; struct target_rt_sigframe { - uint32_t tramp[2]; /* not in kernel, which uses VDSO instead */ struct target_siginfo info; struct target_ucontext uc; }; @@ -105,12 +104,6 @@ static void setup_ucontext(struct target_ucontext *uc, setup_sigcontext(&uc->uc_mcontext, env); } -static inline void install_sigtramp(uint32_t *tramp) -{ - __put_user(0x08b00893, tramp + 0); /* li a7, 139 = __NR_rt_sigreturn */ - __put_user(0x00000073, tramp + 1); /* ecall */ -} - void setup_rt_frame(int sig, struct target_sigaction *ka, target_siginfo_t *info, target_sigset_t *set, CPURISCVState *env) @@ -127,14 +120,13 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, setup_ucontext(&frame->uc, env, set); tswap_siginfo(&frame->info, info); - install_sigtramp(frame->tramp); env->pc = ka->_sa_handler; env->gpr[xSP] = frame_addr; env->gpr[xA0] = sig; env->gpr[xA1] = frame_addr + offsetof(struct target_rt_sigframe, info); env->gpr[xA2] = frame_addr + offsetof(struct target_rt_sigframe, uc); - env->gpr[xRA] = frame_addr + offsetof(struct target_rt_sigframe, tramp); + env->gpr[xRA] = default_rt_sigreturn; return; @@ -203,3 +195,15 @@ badframe: force_sig(TARGET_SIGSEGV); return 0; } + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 8, 0); + assert(tramp != NULL); + + __put_user(0x08b00893, tramp + 0); /* li a7, 139 = __NR_rt_sigreturn */ + __put_user(0x00000073, tramp + 1); /* ecall */ + + default_rt_sigreturn = sigtramp_page; + unlock_user(tramp, sigtramp_page, 8); +} diff --git a/linux-user/riscv/target_signal.h b/linux-user/riscv/target_signal.h index f113ba9a55..3e36fddc9d 100644 --- a/linux-user/riscv/target_signal.h +++ b/linux-user/riscv/target_signal.h @@ -15,4 +15,6 @@ typedef struct target_sigaltstack { #include "../generic/signal.h" +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* RISCV_TARGET_SIGNAL_H */ diff --git a/linux-user/s390x/cpu_loop.c b/linux-user/s390x/cpu_loop.c index 69b69981f6..d089c8417e 100644 --- a/linux-user/s390x/cpu_loop.c +++ b/linux-user/s390x/cpu_loop.c @@ -24,8 +24,6 @@ #include "cpu_loop-common.h" #include "signal-common.h" -/* s390x masks the fault address it reports in si_addr for SIGSEGV and SIGBUS */ -#define S390X_FAIL_ADDR_MASK -4096LL static int get_pgm_data_si_code(int dxc_code) { @@ -111,12 +109,13 @@ void cpu_loop(CPUS390XState *env) n = TARGET_ILL_ILLOPC; goto do_signal_pc; case PGM_PROTECTION: + force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_ACCERR, + env->__excp_addr); + break; case PGM_ADDRESSING: - sig = TARGET_SIGSEGV; - /* XXX: check env->error_code */ - n = TARGET_SEGV_MAPERR; - addr = env->__excp_addr & S390X_FAIL_ADDR_MASK; - goto do_signal; + force_sig_fault(TARGET_SIGSEGV, TARGET_SEGV_MAPERR, + env->__excp_addr); + break; case PGM_EXECUTE: case PGM_SPECIFICATION: case PGM_SPECIAL_OP: diff --git a/linux-user/s390x/signal.c b/linux-user/s390x/signal.c index 80f34086d7..676b948147 100644 --- a/linux-user/s390x/signal.c +++ b/linux-user/s390x/signal.c @@ -68,7 +68,6 @@ typedef struct { target_sigregs sregs; int signo; target_sigregs_ext sregs_ext; - uint16_t retcode; } sigframe; #define TARGET_UC_VXRS 2 @@ -85,7 +84,6 @@ struct target_ucontext { typedef struct { uint8_t callee_used_stack[__SIGNAL_FRAMESIZE]; - uint16_t retcode; struct target_siginfo info; struct target_ucontext uc; } rt_sigframe; @@ -209,9 +207,7 @@ void setup_frame(int sig, struct target_sigaction *ka, if (ka->sa_flags & TARGET_SA_RESTORER) { restorer = ka->sa_restorer; } else { - restorer = frame_addr + offsetof(sigframe, retcode); - __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn, - &frame->retcode); + restorer = default_sigreturn; } /* Set up registers for signal handler */ @@ -262,9 +258,7 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, if (ka->sa_flags & TARGET_SA_RESTORER) { restorer = ka->sa_restorer; } else { - restorer = frame_addr + offsetof(typeof(*frame), retcode); - __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn, - &frame->retcode); + restorer = default_rt_sigreturn; } /* Create siginfo on the signal stack. */ @@ -405,3 +399,17 @@ long do_rt_sigreturn(CPUS390XState *env) unlock_user_struct(frame, frame_addr, 0); return -TARGET_QEMU_ESIGRETURN; } + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint16_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 + 2, 0); + assert(tramp != NULL); + + default_sigreturn = sigtramp_page; + __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn, &tramp[0]); + + default_rt_sigreturn = sigtramp_page + 2; + __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn, &tramp[1]); + + unlock_user(tramp, sigtramp_page, 2 + 2); +} diff --git a/linux-user/s390x/target_signal.h b/linux-user/s390x/target_signal.h index bbfc464d44..64f5f42201 100644 --- a/linux-user/s390x/target_signal.h +++ b/linux-user/s390x/target_signal.h @@ -19,4 +19,6 @@ typedef struct target_sigaltstack { #include "../generic/signal.h" #define TARGET_ARCH_HAS_SETUP_FRAME +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* S390X_TARGET_SIGNAL_H */ diff --git a/linux-user/sh4/cpu_loop.c b/linux-user/sh4/cpu_loop.c index 65b8972e3c..ac9b01840c 100644 --- a/linux-user/sh4/cpu_loop.c +++ b/linux-user/sh4/cpu_loop.c @@ -65,14 +65,6 @@ void cpu_loop(CPUSH4State *env) info.si_code = TARGET_TRAP_BRKPT; queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; - case 0xa0: - case 0xc0: - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - info.si_code = TARGET_SEGV_MAPERR; - info._sifields._sigfault._addr = env->tea; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; case EXCP_ATOMIC: cpu_exec_step_atomic(cs); arch_interrupt = false; diff --git a/linux-user/sh4/signal.c b/linux-user/sh4/signal.c index d70d744bef..faa869fb19 100644 --- a/linux-user/sh4/signal.c +++ b/linux-user/sh4/signal.c @@ -52,7 +52,6 @@ struct target_sigframe { struct target_sigcontext sc; target_ulong extramask[TARGET_NSIG_WORDS-1]; - uint16_t retcode[3]; }; @@ -68,7 +67,6 @@ struct target_rt_sigframe { struct target_siginfo info; struct target_ucontext uc; - uint16_t retcode[3]; }; @@ -190,15 +188,9 @@ void setup_frame(int sig, struct target_sigaction *ka, /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa_flags & TARGET_SA_RESTORER) { - regs->pr = (unsigned long) ka->sa_restorer; + regs->pr = ka->sa_restorer; } else { - /* Generate return code (system call to sigreturn) */ - abi_ulong retcode_addr = frame_addr + - offsetof(struct target_sigframe, retcode); - __put_user(MOVW(2), &frame->retcode[0]); - __put_user(TRAP_NOARG, &frame->retcode[1]); - __put_user((TARGET_NR_sigreturn), &frame->retcode[2]); - regs->pr = (unsigned long) retcode_addr; + regs->pr = default_sigreturn; } /* Set up registers for signal handler */ @@ -248,15 +240,9 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa_flags & TARGET_SA_RESTORER) { - regs->pr = (unsigned long) ka->sa_restorer; + regs->pr = ka->sa_restorer; } else { - /* Generate return code (system call to sigreturn) */ - abi_ulong retcode_addr = frame_addr + - offsetof(struct target_rt_sigframe, retcode); - __put_user(MOVW(2), &frame->retcode[0]); - __put_user(TRAP_NOARG, &frame->retcode[1]); - __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]); - regs->pr = (unsigned long) retcode_addr; + regs->pr = default_rt_sigreturn; } /* Set up registers for signal handler */ @@ -334,3 +320,21 @@ badframe: force_sig(TARGET_SIGSEGV); return -TARGET_QEMU_ESIGRETURN; } + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint16_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 * 6, 0); + assert(tramp != NULL); + + default_sigreturn = sigtramp_page; + __put_user(MOVW(2), &tramp[0]); + __put_user(TRAP_NOARG, &tramp[1]); + __put_user(TARGET_NR_sigreturn, &tramp[2]); + + default_rt_sigreturn = sigtramp_page + 6; + __put_user(MOVW(2), &tramp[3]); + __put_user(TRAP_NOARG, &tramp[4]); + __put_user(TARGET_NR_rt_sigreturn, &tramp[5]); + + unlock_user(tramp, sigtramp_page, 2 * 6); +} diff --git a/linux-user/sh4/target_signal.h b/linux-user/sh4/target_signal.h index d7309b7136..04069cba66 100644 --- a/linux-user/sh4/target_signal.h +++ b/linux-user/sh4/target_signal.h @@ -22,4 +22,6 @@ typedef struct target_sigaltstack { #include "../generic/signal.h" #define TARGET_ARCH_HAS_SETUP_FRAME +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif /* SH4_TARGET_SIGNAL_H */ diff --git a/linux-user/signal-common.h b/linux-user/signal-common.h index 79511becb4..7457f8025c 100644 --- a/linux-user/signal-common.h +++ b/linux-user/signal-common.h @@ -20,6 +20,12 @@ #ifndef SIGNAL_COMMON_H #define SIGNAL_COMMON_H +/* Fallback addresses into sigtramp page. */ +extern abi_ulong default_sigreturn; +extern abi_ulong default_rt_sigreturn; + +void setup_sigtramp(abi_ulong tramp_page); + int on_sig_stack(unsigned long sp); int sas_ss_flags(unsigned long sp); abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka); diff --git a/linux-user/signal.c b/linux-user/signal.c index 2038216455..81c45bfce9 100644 --- a/linux-user/signal.c +++ b/linux-user/signal.c @@ -19,6 +19,7 @@ #include "qemu/osdep.h" #include "qemu/bitops.h" #include "exec/gdbstub.h" +#include "hw/core/tcg-cpu-ops.h" #include #include @@ -29,12 +30,16 @@ #include "loader.h" #include "trace.h" #include "signal-common.h" +#include "host-signal.h" static struct target_sigaction sigact_table[TARGET_NSIG]; static void host_signal_handler(int host_signum, siginfo_t *info, void *puc); +/* Fallback addresses into sigtramp page. */ +abi_ulong default_sigreturn; +abi_ulong default_rt_sigreturn; /* * System includes define _NSIG as SIGRTMAX + 1, @@ -683,9 +688,38 @@ void force_sigsegv(int oldsig) } force_sig(TARGET_SIGSEGV); } - #endif +void cpu_loop_exit_sigsegv(CPUState *cpu, target_ulong addr, + MMUAccessType access_type, bool maperr, uintptr_t ra) +{ + const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops; + + if (tcg_ops->record_sigsegv) { + tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra); + } + + force_sig_fault(TARGET_SIGSEGV, + maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR, + addr); + cpu->exception_index = EXCP_INTERRUPT; + cpu_loop_exit_restore(cpu, ra); +} + +void cpu_loop_exit_sigbus(CPUState *cpu, target_ulong addr, + MMUAccessType access_type, uintptr_t ra) +{ + const struct TCGCPUOps *tcg_ops = CPU_GET_CLASS(cpu)->tcg_ops; + + if (tcg_ops->record_sigbus) { + tcg_ops->record_sigbus(cpu, addr, access_type, ra); + } + + force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr); + cpu->exception_index = EXCP_INTERRUPT; + cpu_loop_exit_restore(cpu, ra); +} + /* abort execution with signal */ static void QEMU_NORETURN dump_core_and_abort(int target_sig) { @@ -766,41 +800,101 @@ static inline void rewind_if_in_safe_syscall(void *puc) } #endif -static void host_signal_handler(int host_signum, siginfo_t *info, - void *puc) +static void host_signal_handler(int host_sig, siginfo_t *info, void *puc) { CPUArchState *env = thread_cpu->env_ptr; CPUState *cpu = env_cpu(env); TaskState *ts = cpu->opaque; - - int sig; target_siginfo_t tinfo; ucontext_t *uc = puc; struct emulated_sigtable *k; + int guest_sig; + uintptr_t pc = 0; + bool sync_sig = false; - /* the CPU emulator uses some host signals to detect exceptions, - we forward to it some signals */ - if ((host_signum == SIGSEGV || host_signum == SIGBUS) - && info->si_code > 0) { - if (cpu_signal_handler(host_signum, info, puc)) - return; + /* + * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special + * handling wrt signal blocking and unwinding. + */ + if ((host_sig == SIGSEGV || host_sig == SIGBUS) && info->si_code > 0) { + MMUAccessType access_type; + uintptr_t host_addr; + abi_ptr guest_addr; + bool is_write; + + host_addr = (uintptr_t)info->si_addr; + + /* + * Convert forcefully to guest address space: addresses outside + * reserved_va are still valid to report via SEGV_MAPERR. + */ + guest_addr = h2g_nocheck(host_addr); + + pc = host_signal_pc(uc); + is_write = host_signal_write(info, uc); + access_type = adjust_signal_pc(&pc, is_write); + + if (host_sig == SIGSEGV) { + bool maperr = true; + + if (info->si_code == SEGV_ACCERR && h2g_valid(host_addr)) { + /* If this was a write to a TB protected page, restart. */ + if (is_write && + handle_sigsegv_accerr_write(cpu, &uc->uc_sigmask, + pc, guest_addr)) { + return; + } + + /* + * With reserved_va, the whole address space is PROT_NONE, + * which means that we may get ACCERR when we want MAPERR. + */ + if (page_get_flags(guest_addr) & PAGE_VALID) { + maperr = false; + } else { + info->si_code = SEGV_MAPERR; + } + } + + sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL); + cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc); + } else { + sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL); + if (info->si_code == BUS_ADRALN) { + cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc); + } + } + + sync_sig = true; } /* get target signal number */ - sig = host_to_target_signal(host_signum); - if (sig < 1 || sig > TARGET_NSIG) + guest_sig = host_to_target_signal(host_sig); + if (guest_sig < 1 || guest_sig > TARGET_NSIG) { return; - trace_user_host_signal(env, host_signum, sig); + } + trace_user_host_signal(env, host_sig, guest_sig); + + host_to_target_siginfo_noswap(&tinfo, info); + k = &ts->sigtab[guest_sig - 1]; + k->info = tinfo; + k->pending = guest_sig; + ts->signal_pending = 1; + + /* + * For synchronous signals, unwind the cpu state to the faulting + * insn and then exit back to the main loop so that the signal + * is delivered immediately. + */ + if (sync_sig) { + cpu->exception_index = EXCP_INTERRUPT; + cpu_loop_exit_restore(cpu, pc); + } rewind_if_in_safe_syscall(puc); - host_to_target_siginfo_noswap(&tinfo, info); - k = &ts->sigtab[sig - 1]; - k->info = tinfo; - k->pending = sig; - ts->signal_pending = 1; - - /* Block host signals until target signal handler entered. We + /* + * Block host signals until target signal handler entered. We * can't block SIGSEGV or SIGBUS while we're executing guest * code in case the guest code provokes one in the window between * now and it getting out to the main loop. Signals will be diff --git a/linux-user/sparc/cpu_loop.c b/linux-user/sparc/cpu_loop.c index ad29b4eb6a..0ba65e431c 100644 --- a/linux-user/sparc/cpu_loop.c +++ b/linux-user/sparc/cpu_loop.c @@ -219,17 +219,6 @@ void cpu_loop (CPUSPARCState *env) case TT_WIN_UNF: /* window underflow */ restore_window(env); break; - case TT_TFAULT: - case TT_DFAULT: - { - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - /* XXX: check env->error_code */ - info.si_code = TARGET_SEGV_MAPERR; - info._sifields._sigfault._addr = env->mmuregs[4]; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - } - break; #else case TT_SPILL: /* window overflow */ save_window(env); @@ -237,20 +226,6 @@ void cpu_loop (CPUSPARCState *env) case TT_FILL: /* window underflow */ restore_window(env); break; - case TT_TFAULT: - case TT_DFAULT: - { - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - /* XXX: check env->error_code */ - info.si_code = TARGET_SEGV_MAPERR; - if (trapnr == TT_DFAULT) - info._sifields._sigfault._addr = env->dmmu.mmuregs[4]; - else - info._sifields._sigfault._addr = cpu_tsptr(env)->tpc; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - } - break; #ifndef TARGET_ABI32 case 0x16e: flush_windows(env); diff --git a/linux-user/sparc/signal.c b/linux-user/sparc/signal.c index 3bc023d281..23e1e761de 100644 --- a/linux-user/sparc/signal.c +++ b/linux-user/sparc/signal.c @@ -242,6 +242,12 @@ static void restore_fpu(struct target_siginfo_fpu *fpu, CPUSPARCState *env) } #ifdef TARGET_ARCH_HAS_SETUP_FRAME +static void install_sigtramp(uint32_t *tramp, int syscall) +{ + __put_user(0x82102000u + syscall, &tramp[0]); /* mov syscall, %g1 */ + __put_user(0x91d02010u, &tramp[1]); /* t 0x10 */ +} + void setup_frame(int sig, struct target_sigaction *ka, target_sigset_t *set, CPUSPARCState *env) { @@ -291,13 +297,9 @@ void setup_frame(int sig, struct target_sigaction *ka, if (ka->ka_restorer) { env->regwptr[WREG_O7] = ka->ka_restorer; } else { - env->regwptr[WREG_O7] = sf_addr + - offsetof(struct target_signal_frame, insns) - 2 * 4; - - /* mov __NR_sigreturn, %g1 */ - __put_user(0x821020d8u, &sf->insns[0]); - /* t 0x10 */ - __put_user(0x91d02010u, &sf->insns[1]); + /* Not used, but retain for ABI compatibility. */ + install_sigtramp(sf->insns, TARGET_NR_sigreturn); + env->regwptr[WREG_O7] = default_sigreturn; } unlock_user(sf, sf_addr, sf_size); } @@ -358,13 +360,9 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, if (ka->ka_restorer) { env->regwptr[WREG_O7] = ka->ka_restorer; } else { - env->regwptr[WREG_O7] = - sf_addr + offsetof(struct target_rt_signal_frame, insns) - 2 * 4; - - /* mov __NR_rt_sigreturn, %g1 */ - __put_user(0x82102065u, &sf->insns[0]); - /* t 0x10 */ - __put_user(0x91d02010u, &sf->insns[1]); + /* Not used, but retain for ABI compatibility. */ + install_sigtramp(sf->insns, TARGET_NR_rt_sigreturn); + env->regwptr[WREG_O7] = default_rt_sigreturn; } #else env->regwptr[WREG_O7] = ka->ka_restorer; @@ -775,4 +773,18 @@ do_sigsegv: unlock_user_struct(ucp, ucp_addr, 1); force_sig(TARGET_SIGSEGV); } +#else +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 2 * 8, 0); + assert(tramp != NULL); + + default_sigreturn = sigtramp_page; + install_sigtramp(tramp, TARGET_NR_sigreturn); + + default_rt_sigreturn = sigtramp_page + 8; + install_sigtramp(tramp + 2, TARGET_NR_rt_sigreturn); + + unlock_user(tramp, sigtramp_page, 2 * 8); +} #endif diff --git a/linux-user/sparc/target_signal.h b/linux-user/sparc/target_signal.h index 34f9a12519..e661ddd6ab 100644 --- a/linux-user/sparc/target_signal.h +++ b/linux-user/sparc/target_signal.h @@ -69,6 +69,10 @@ typedef struct target_sigaltstack { #ifdef TARGET_ABI32 #define TARGET_ARCH_HAS_SETUP_FRAME +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 +#else +/* For sparc64, use of KA_RESTORER is mandatory. */ +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 0 #endif /* bit-flags */ diff --git a/linux-user/x86_64/target_signal.h b/linux-user/x86_64/target_signal.h index 4ea74f20dd..4673c5a886 100644 --- a/linux-user/x86_64/target_signal.h +++ b/linux-user/x86_64/target_signal.h @@ -21,4 +21,7 @@ typedef struct target_sigaltstack { #include "../generic/signal.h" +/* For x86_64, use of SA_RESTORER is mandatory. */ +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 0 + #endif /* X86_64_TARGET_SIGNAL_H */ diff --git a/linux-user/xtensa/cpu_loop.c b/linux-user/xtensa/cpu_loop.c index 622afbcd34..a83490ab35 100644 --- a/linux-user/xtensa/cpu_loop.c +++ b/linux-user/xtensa/cpu_loop.c @@ -226,15 +226,6 @@ void cpu_loop(CPUXtensaState *env) queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); break; - case LOAD_PROHIBITED_CAUSE: - case STORE_PROHIBITED_CAUSE: - info.si_signo = TARGET_SIGSEGV; - info.si_errno = 0; - info.si_code = TARGET_SEGV_ACCERR; - info._sifields._sigfault._addr = env->sregs[EXCVADDR]; - queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); - break; - default: fprintf(stderr, "exccause = %d\n", env->sregs[EXCCAUSE]); g_assert_not_reached(); diff --git a/linux-user/xtensa/signal.c b/linux-user/xtensa/signal.c index 7a3bfb92ca..81572a5fc7 100644 --- a/linux-user/xtensa/signal.c +++ b/linux-user/xtensa/signal.c @@ -128,6 +128,29 @@ static int setup_sigcontext(struct target_rt_sigframe *frame, return 1; } +static void install_sigtramp(uint8_t *tramp) +{ +#ifdef TARGET_WORDS_BIGENDIAN + /* Generate instruction: MOVI a2, __NR_rt_sigreturn */ + __put_user(0x22, &tramp[0]); + __put_user(0x0a, &tramp[1]); + __put_user(TARGET_NR_rt_sigreturn, &tramp[2]); + /* Generate instruction: SYSCALL */ + __put_user(0x00, &tramp[3]); + __put_user(0x05, &tramp[4]); + __put_user(0x00, &tramp[5]); +#else + /* Generate instruction: MOVI a2, __NR_rt_sigreturn */ + __put_user(0x22, &tramp[0]); + __put_user(0xa0, &tramp[1]); + __put_user(TARGET_NR_rt_sigreturn, &tramp[2]); + /* Generate instruction: SYSCALL */ + __put_user(0x00, &tramp[3]); + __put_user(0x50, &tramp[4]); + __put_user(0x00, &tramp[5]); +#endif +} + void setup_rt_frame(int sig, struct target_sigaction *ka, target_siginfo_t *info, target_sigset_t *set, CPUXtensaState *env) @@ -164,26 +187,9 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, if (ka->sa_flags & TARGET_SA_RESTORER) { ra = ka->sa_restorer; } else { - ra = frame_addr + offsetof(struct target_rt_sigframe, retcode); -#ifdef TARGET_WORDS_BIGENDIAN - /* Generate instruction: MOVI a2, __NR_rt_sigreturn */ - __put_user(0x22, &frame->retcode[0]); - __put_user(0x0a, &frame->retcode[1]); - __put_user(TARGET_NR_rt_sigreturn, &frame->retcode[2]); - /* Generate instruction: SYSCALL */ - __put_user(0x00, &frame->retcode[3]); - __put_user(0x05, &frame->retcode[4]); - __put_user(0x00, &frame->retcode[5]); -#else - /* Generate instruction: MOVI a2, __NR_rt_sigreturn */ - __put_user(0x22, &frame->retcode[0]); - __put_user(0xa0, &frame->retcode[1]); - __put_user(TARGET_NR_rt_sigreturn, &frame->retcode[2]); - /* Generate instruction: SYSCALL */ - __put_user(0x00, &frame->retcode[3]); - __put_user(0x50, &frame->retcode[4]); - __put_user(0x00, &frame->retcode[5]); -#endif + /* Not used, but retain for ABI compatibility. */ + install_sigtramp(frame->retcode); + ra = default_rt_sigreturn; } memset(env->regs, 0, sizeof(env->regs)); env->pc = ka->_sa_handler; @@ -264,3 +270,13 @@ badframe: force_sig(TARGET_SIGSEGV); return -TARGET_QEMU_ESIGRETURN; } + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint8_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 6, 0); + assert(tramp != NULL); + + default_rt_sigreturn = sigtramp_page; + install_sigtramp(tramp); + unlock_user(tramp, sigtramp_page, 6); +} diff --git a/linux-user/xtensa/target_signal.h b/linux-user/xtensa/target_signal.h index c60bf656f6..1c7ee73154 100644 --- a/linux-user/xtensa/target_signal.h +++ b/linux-user/xtensa/target_signal.h @@ -20,4 +20,6 @@ typedef struct target_sigaltstack { #include "../generic/signal.h" +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 + #endif diff --git a/meson b/meson index 776acd2a80..12f9f04ba0 160000 --- a/meson +++ b/meson @@ -1 +1 @@ -Subproject commit 776acd2a805c9b42b4f0375150977df42130317f +Subproject commit 12f9f04ba0decfda425dbbf9a501084c153a2d18 diff --git a/meson.build b/meson.build index e83a5602a3..e4995214c9 100644 --- a/meson.build +++ b/meson.build @@ -1,14 +1,10 @@ -project('qemu', ['c'], meson_version: '>=0.55.0', - default_options: ['warning_level=1', 'c_std=gnu11', 'cpp_std=gnu++11', 'b_colorout=auto'] + - (meson.version().version_compare('>=0.56.0') ? [ 'b_staticpic=false' ] : []), - version: run_command('head', meson.source_root() / 'VERSION').stdout().strip()) +project('qemu', ['c'], meson_version: '>=0.58.2', + default_options: ['warning_level=1', 'c_std=gnu11', 'cpp_std=gnu++11', 'b_colorout=auto', + 'b_staticpic=false'], + version: files('VERSION')) not_found = dependency('', required: false) -if meson.version().version_compare('>=0.56.0') - keyval = import('keyval') -else - keyval = import('unstable-keyval') -endif +keyval = import('keyval') ss = import('sourceset') fs = import('fs') @@ -44,19 +40,22 @@ config_host_data = configuration_data() genh = [] target_dirs = config_host['TARGET_DIRS'].split() -have_user = false +have_linux_user = false +have_bsd_user = false have_system = false foreach target : target_dirs - have_user = have_user or target.endswith('-user') + have_linux_user = have_linux_user or target.endswith('linux-user') + have_bsd_user = have_bsd_user or target.endswith('bsd-user') have_system = have_system or target.endswith('-softmmu') endforeach +have_user = have_linux_user or have_bsd_user have_tools = 'CONFIG_TOOLS' in config_host have_block = have_system or have_tools python = import('python').find_installation() supported_oses = ['windows', 'freebsd', 'netbsd', 'openbsd', 'darwin', 'sunos', 'linux'] -supported_cpus = ['ppc', 'ppc64', 's390x', 'riscv32', 'riscv64', 'x86', 'x86_64', +supported_cpus = ['ppc', 'ppc64', 's390x', 'riscv', 'x86', 'x86_64', 'arm', 'aarch64', 'mips', 'mips64', 'sparc', 'sparc64'] cpu = host_machine.cpu_family() @@ -76,6 +75,12 @@ else kvm_targets = [] endif +kvm_targets_c = '""' +if not get_option('kvm').disabled() and targetos == 'linux' + kvm_targets_c = '"' + '" ,"'.join(kvm_targets) + '"' +endif +config_host_data.set('CONFIG_KVM_TARGETS', kvm_targets_c) + accelerator_targets = { 'CONFIG_KVM': kvm_targets } if cpu in ['aarch64'] @@ -106,14 +111,30 @@ if targetos != 'darwin' endif edk2_targets = [ 'arm-softmmu', 'aarch64-softmmu', 'i386-softmmu', 'x86_64-softmmu' ] -install_edk2_blobs = false -if get_option('install_blobs') - foreach target : target_dirs - install_edk2_blobs = install_edk2_blobs or target in edk2_targets - endforeach -endif +unpack_edk2_blobs = false +foreach target : edk2_targets + if target in target_dirs + bzip2 = find_program('bzip2', required: get_option('install_blobs')) + unpack_edk2_blobs = bzip2.found() + break + endif +endforeach -bzip2 = find_program('bzip2', required: install_edk2_blobs) +dtrace = not_found +stap = not_found +if 'dtrace' in get_option('trace_backends') + dtrace = find_program('dtrace', required: true) + stap = find_program('stap', required: false) + if stap.found() + # Workaround to avoid dtrace(1) producing a file with 'hidden' symbol + # visibility. Define STAP_SDT_V2 to produce 'default' symbol visibility + # instead. QEMU --enable-modules depends on this because the SystemTap + # semaphores are linked into the main binary and not the module's shared + # object. + add_global_arguments('-DSTAP_SDT_V2', + native: false, language: ['c', 'cpp', 'objc']) + endif +endif ################## # Compiler flags # @@ -121,10 +142,37 @@ bzip2 = find_program('bzip2', required: install_edk2_blobs) # Specify linker-script with add_project_link_arguments so that it is not placed # within a linker --start-group/--end-group pair -if 'CONFIG_FUZZ' in config_host - add_project_link_arguments(['-Wl,-T,', - (meson.current_source_dir() / 'tests/qtest/fuzz/fork_fuzz.ld')], +if get_option('fuzzing') + add_project_link_arguments(['-Wl,-T,', + (meson.current_source_dir() / 'tests/qtest/fuzz/fork_fuzz.ld')], + native: false, language: ['c', 'cpp', 'objc']) + + # Specify a filter to only instrument code that is directly related to + # virtual-devices. + configure_file(output: 'instrumentation-filter', + input: 'scripts/oss-fuzz/instrumentation-filter-template', + copy: true) + add_global_arguments( + cc.get_supported_arguments('-fsanitize-coverage-allowlist=instrumentation-filter'), + native: false, language: ['c', 'cpp', 'objc']) + + if get_option('fuzzing_engine') == '' + # Add CFLAGS to tell clang to add fuzzer-related instrumentation to all the + # compiled code. To build non-fuzzer binaries with --enable-fuzzing, link + # everything with fsanitize=fuzzer-no-link. Otherwise, the linker will be + # unable to bind the fuzzer-related callbacks added by instrumentation. + add_global_arguments('-fsanitize=fuzzer-no-link', + native: false, language: ['c', 'cpp', 'objc']) + add_global_link_arguments('-fsanitize=fuzzer-no-link', native: false, language: ['c', 'cpp', 'objc']) + # For the actual fuzzer binaries, we need to link against the libfuzzer + # library. They need to be configurable, to support OSS-Fuzz + fuzz_exe_ldflags = ['-fsanitize=fuzzer'] + else + # LIB_FUZZING_ENGINE was set; assume we are running on OSS-Fuzz, and + # the needed CFLAGS have already been provided + fuzz_exe_ldflags = get_option('fuzzing_engine').split() + endif endif add_global_arguments(config_host['QEMU_CFLAGS'].split(), @@ -149,6 +197,10 @@ add_project_arguments('-iquote', '.', link_language = meson.get_external_property('link_language', 'cpp') if link_language == 'cpp' add_languages('cpp', required: true, native: false) + cxx = meson.get_compiler('cpp') + linker = cxx +else + linker = cc endif if host_machine.system() == 'darwin' add_languages('objc', required: false, native: false) @@ -167,6 +219,30 @@ endif # Target-specific checks and dependencies # ########################################### +if get_option('fuzzing') and get_option('fuzzing_engine') == '' and \ + not cc.links(''' + #include + #include + int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size); + int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { return 0; } + ''', + args: ['-Werror', '-fsanitize=fuzzer']) + error('Your compiler does not support -fsanitize=fuzzer') +endif + +if 'ftrace' in get_option('trace_backends') and targetos != 'linux' + error('ftrace is supported only on Linux') +endif +if 'syslog' in get_option('trace_backends') and not cc.compiles(''' + #include + int main(void) { + openlog("qemu", LOG_PID, LOG_DAEMON); + syslog(LOG_INFO, "configure"); + return 0; + }''') + error('syslog is not supported on this system') +endif + if targetos != 'linux' and get_option('mpath').enabled() error('Multipath is supported only on Linux') endif @@ -187,6 +263,7 @@ iokit = [] emulator_link_args = [] nvmm =not_found hvf = not_found +host_dsosuf = '.so' if targetos == 'windows' socket = cc.find_library('ws2_32') winmm = cc.find_library('winmm') @@ -195,9 +272,11 @@ if targetos == 'windows' version_res = win.compile_resources('version.rc', depend_files: files('pc-bios/qemu-nsis.ico'), include_directories: include_directories('.')) + host_dsosuf = '.dll' elif targetos == 'darwin' coref = dependency('appleframeworks', modules: 'CoreFoundation') iokit = dependency('appleframeworks', modules: 'IOKit', required: false) + host_dsosuf = '.dylib' elif targetos == 'sunos' socket = [cc.find_library('socket'), cc.find_library('nsl'), @@ -256,12 +335,12 @@ tcg_arch = config_host['ARCH'] if not get_option('tcg').disabled() if cpu not in supported_cpus if get_option('tcg_interpreter') - warning('Unsupported CPU @0@, will use TCG with TCI (experimental and slow)'.format(cpu)) + warning('Unsupported CPU @0@, will use TCG with TCI (slow)'.format(cpu)) else error('Unsupported CPU @0@, try --enable-tcg-interpreter'.format(cpu)) endif elif get_option('tcg_interpreter') - warning('Use of the TCG interpretor is not recommended on this host') + warning('Use of the TCG interpreter is not recommended on this host') warning('architecture. There is a native TCG execution backend available') warning('which provides substantially better performance and reliability.') warning('It is strongly recommended to remove the --enable-tcg-interpreter') @@ -272,14 +351,10 @@ if not get_option('tcg').disabled() tcg_arch = 'tci' elif config_host['ARCH'] == 'sparc64' tcg_arch = 'sparc' - elif config_host['ARCH'] == 's390x' - tcg_arch = 's390' elif config_host['ARCH'] in ['x86_64', 'x32'] tcg_arch = 'i386' elif config_host['ARCH'] == 'ppc64' tcg_arch = 'ppc' - elif config_host['ARCH'] in ['riscv32', 'riscv64'] - tcg_arch = 'riscv' endif add_project_arguments('-iquote', meson.current_source_dir() / 'tcg' / tcg_arch, language: ['c', 'cpp', 'objc']) @@ -327,17 +402,23 @@ if 'CONFIG_GIO' in config_host link_args: config_host['GIO_LIBS'].split()) endif lttng = not_found -if 'CONFIG_TRACE_UST' in config_host - lttng = declare_dependency(link_args: config_host['LTTNG_UST_LIBS'].split()) +if 'ust' in get_option('trace_backends') + lttng = dependency('lttng-ust', required: true, method: 'pkg-config', + kwargs: static_kwargs) endif pixman = not_found if have_system or have_tools pixman = dependency('pixman-1', required: have_system, version:'>=0.21.8', method: 'pkg-config', kwargs: static_kwargs) endif -libaio = cc.find_library('aio', required: false) zlib = dependency('zlib', required: true, kwargs: static_kwargs) +libaio = not_found +if not get_option('linux_aio').auto() or have_block + libaio = cc.find_library('aio', has_headers: ['libaio.h'], + required: get_option('linux_aio'), + kwargs: static_kwargs) +endif linux_io_uring = not_found if not get_option('linux_io_uring').auto() or have_block linux_io_uring = dependency('liburing', required: get_option('linux_io_uring'), @@ -430,35 +511,60 @@ else xkbcommon = dependency('xkbcommon', required: get_option('xkbcommon'), method: 'pkg-config', kwargs: static_kwargs) endif + vde = not_found -if config_host.has_key('CONFIG_VDE') - vde = declare_dependency(link_args: config_host['VDE_LIBS'].split()) +if not get_option('vde').auto() or have_system or have_tools + vde = cc.find_library('vdeplug', has_headers: ['libvdeplug.h'], + required: get_option('vde'), + kwargs: static_kwargs) endif +if vde.found() and not cc.links(''' + #include + int main(void) + { + struct vde_open_args a = {0, 0, 0}; + char s[] = ""; + vde_open(s, s, &a); + return 0; + }''', dependencies: vde) + vde = not_found + if get_option('cap_ng').enabled() + error('could not link libvdeplug') + else + warning('could not link libvdeplug, disabling') + endif +endif + pulse = not_found -if 'CONFIG_LIBPULSE' in config_host - pulse = declare_dependency(compile_args: config_host['PULSE_CFLAGS'].split(), - link_args: config_host['PULSE_LIBS'].split()) +if not get_option('pa').auto() or (targetos == 'linux' and have_system) + pulse = dependency('libpulse', required: get_option('pa'), + method: 'pkg-config', kwargs: static_kwargs) endif alsa = not_found -if 'CONFIG_ALSA' in config_host - alsa = declare_dependency(compile_args: config_host['ALSA_CFLAGS'].split(), - link_args: config_host['ALSA_LIBS'].split()) +if not get_option('alsa').auto() or (targetos == 'linux' and have_system) + alsa = dependency('alsa', required: get_option('alsa'), + method: 'pkg-config', kwargs: static_kwargs) endif jack = not_found -if 'CONFIG_LIBJACK' in config_host - jack = declare_dependency(link_args: config_host['JACK_LIBS'].split()) +if not get_option('jack').auto() or have_system + jack = dependency('jack', required: get_option('jack'), + method: 'pkg-config', kwargs: static_kwargs) +endif + +spice_protocol = not_found +if not get_option('spice_protocol').auto() or have_system + spice_protocol = dependency('spice-protocol', version: '>=0.12.3', + required: get_option('spice_protocol'), + method: 'pkg-config', kwargs: static_kwargs) endif spice = not_found -spice_headers = not_found -spice_protocol = not_found -if 'CONFIG_SPICE' in config_host - spice = declare_dependency(compile_args: config_host['SPICE_CFLAGS'].split(), - link_args: config_host['SPICE_LIBS'].split()) - spice_headers = declare_dependency(compile_args: config_host['SPICE_CFLAGS'].split()) -endif -if 'CONFIG_SPICE_PROTOCOL' in config_host - spice_protocol = declare_dependency(compile_args: config_host['SPICE_PROTOCOL_CFLAGS'].split()) +if not get_option('spice').auto() or have_system + spice = dependency('spice-server', version: '>=0.12.5', + required: get_option('spice'), + method: 'pkg-config', kwargs: static_kwargs) endif +spice_headers = spice.partial_dependency(compile_args: true, includes: true) + rt = cc.find_library('rt', required: false) libdl = not_found if 'CONFIG_PLUGIN' in config_host @@ -807,16 +913,39 @@ if liblzfse.found() and not cc.links(''' endif oss = not_found -if 'CONFIG_AUDIO_OSS' in config_host - oss = declare_dependency(link_args: config_host['OSS_LIBS'].split()) +if have_system and not get_option('oss').disabled() + if not cc.has_header('sys/soundcard.h') + # not found + elif targetos == 'netbsd' + oss = cc.find_library('ossaudio', required: get_option('oss'), + kwargs: static_kwargs) + else + oss = declare_dependency() + endif + + if not oss.found() + if get_option('oss').enabled() + error('OSS not found') + endif + endif endif dsound = not_found -if 'CONFIG_AUDIO_DSOUND' in config_host - dsound = declare_dependency(link_args: config_host['DSOUND_LIBS'].split()) +if not get_option('dsound').auto() or (targetos == 'windows' and have_system) + if cc.has_header('dsound.h') + dsound = declare_dependency(link_args: ['-lole32', '-ldxguid']) + endif + + if not dsound.found() + if get_option('dsound').enabled() + error('DirectSound not found') + endif + endif endif + coreaudio = not_found -if 'CONFIG_AUDIO_COREAUDIO' in config_host - coreaudio = declare_dependency(link_args: config_host['COREAUDIO_LIBS'].split()) +if not get_option('coreaudio').auto() or (targetos == 'darwin' and have_system) + coreaudio = dependency('appleframeworks', modules: 'CoreAudio', + required: get_option('coreaudio')) endif opengl = not_found @@ -984,7 +1113,7 @@ if not get_option('snappy').auto() or have_system required: get_option('snappy'), kwargs: static_kwargs) endif -if snappy.found() and not cc.links(''' +if snappy.found() and not linker.links(''' #include int main(void) { snappy_max_compressed_length(4096); return 0; }''', dependencies: snappy) snappy = not_found @@ -1162,6 +1291,49 @@ if libbpf.found() and not cc.links(''' endif endif +################# +# config-host.h # +################# + +audio_drivers_selected = [] +if have_system + audio_drivers_available = { + 'alsa': alsa.found(), + 'coreaudio': coreaudio.found(), + 'dsound': dsound.found(), + 'jack': jack.found(), + 'oss': oss.found(), + 'pa': pulse.found(), + 'sdl': sdl.found(), + } + foreach k, v: audio_drivers_available + config_host_data.set('CONFIG_AUDIO_' + k.to_upper(), v) + endforeach + + # Default to native drivers first, OSS second, SDL third + audio_drivers_priority = \ + [ 'pa', 'coreaudio', 'dsound', 'oss' ] + \ + (targetos == 'linux' ? [] : [ 'sdl' ]) + audio_drivers_default = [] + foreach k: audio_drivers_priority + if audio_drivers_available[k] + audio_drivers_default += k + endif + endforeach + + foreach k: get_option('audio_drv_list') + if k == 'default' + audio_drivers_selected += audio_drivers_default + elif not audio_drivers_available[k] + error('Audio driver "@0@" not available.'.format(k)) + else + audio_drivers_selected += k + endif + endforeach +endif +config_host_data.set('CONFIG_AUDIO_DRIVERS', + '"' + '", "'.join(audio_drivers_selected) + '", ') + if get_option('cfi') cfi_flags=[] # Check for dependency on LTO @@ -1205,10 +1377,6 @@ endif have_host_block_device = (targetos != 'darwin' or cc.has_header('IOKit/storage/IOMedia.h')) -################# -# config-host.h # -################# - have_virtfs = (targetos == 'linux' and have_system and libattr.found() and @@ -1230,6 +1398,11 @@ elif get_option('virtfs').disabled() have_virtfs = false endif +foreach k : get_option('trace_backends') + config_host_data.set('CONFIG_TRACE_' + k.to_upper(), true) +endforeach +config_host_data.set_quoted('CONFIG_TRACE_FILE', get_option('trace_file')) + config_host_data.set_quoted('CONFIG_BINDIR', get_option('prefix') / get_option('bindir')) config_host_data.set_quoted('CONFIG_PREFIX', get_option('prefix')) config_host_data.set_quoted('CONFIG_QEMU_CONFDIR', get_option('prefix') / qemu_confdir) @@ -1246,6 +1419,8 @@ config_host_data.set_quoted('CONFIG_SYSCONFDIR', get_option('prefix') / get_opti config_host_data.set('CONFIG_ATTR', libattr.found()) config_host_data.set('CONFIG_BRLAPI', brlapi.found()) config_host_data.set('CONFIG_COCOA', cocoa.found()) +config_host_data.set('CONFIG_FUZZ', get_option('fuzzing')) +config_host_data.set('CONFIG_GCOV', get_option('b_coverage')) config_host_data.set('CONFIG_LIBUDEV', libudev.found()) config_host_data.set('CONFIG_LZO', lzo.found()) config_host_data.set('CONFIG_MPATH', mpathpersist.found()) @@ -1270,6 +1445,7 @@ config_host_data.set('CONFIG_EBPF', libbpf.found()) config_host_data.set('CONFIG_LIBDAXCTL', libdaxctl.found()) config_host_data.set('CONFIG_LIBISCSI', libiscsi.found()) config_host_data.set('CONFIG_LIBNFS', libnfs.found()) +config_host_data.set('CONFIG_LINUX_AIO', libaio.found()) config_host_data.set('CONFIG_LINUX_IO_URING', linux_io_uring.found()) config_host_data.set('CONFIG_LIBPMEM', libpmem.found()) config_host_data.set('CONFIG_RBD', rbd.found()) @@ -1278,6 +1454,7 @@ config_host_data.set('CONFIG_SDL_IMAGE', sdl_image.found()) config_host_data.set('CONFIG_SECCOMP', seccomp.found()) config_host_data.set('CONFIG_SNAPPY', snappy.found()) config_host_data.set('CONFIG_USB_LIBUSB', libusb.found()) +config_host_data.set('CONFIG_VDE', vde.found()) config_host_data.set('CONFIG_VHOST_USER_BLK_SERVER', have_vhost_user_blk_server) config_host_data.set('CONFIG_VNC', vnc.found()) config_host_data.set('CONFIG_VNC_JPEG', jpeg.found()) @@ -1298,6 +1475,8 @@ config_host_data.set('CONFIG_STATX', has_statx) config_host_data.set('CONFIG_ZSTD', zstd.found()) config_host_data.set('CONFIG_FUSE', fuse.found()) config_host_data.set('CONFIG_FUSE_LSEEK', fuse_lseek.found()) +config_host_data.set('CONFIG_SPICE_PROTOCOL', spice_protocol.found()) +config_host_data.set('CONFIG_SPICE', spice.found()) config_host_data.set('CONFIG_X11', x11.found()) config_host_data.set('CONFIG_CFI', get_option('cfi')) config_host_data.set('QEMU_VERSION', '"@0@"'.format(meson.project_version())) @@ -1305,7 +1484,9 @@ config_host_data.set('QEMU_VERSION_MAJOR', meson.project_version().split('.')[0] config_host_data.set('QEMU_VERSION_MINOR', meson.project_version().split('.')[1]) config_host_data.set('QEMU_VERSION_MICRO', meson.project_version().split('.')[2]) +config_host_data.set_quoted('CONFIG_HOST_DSOSUF', host_dsosuf) config_host_data.set('HAVE_HOST_BLOCK_DEVICE', have_host_block_device) +config_host_data.set('HOST_WORDS_BIGENDIAN', host_machine.endian() == 'big') # has_header config_host_data.set('CONFIG_EPOLL', cc.has_header('sys/epoll.h')) @@ -1337,6 +1518,12 @@ config_host_data.set('HAVE_COPY_FILE_RANGE', cc.has_function('copy_file_range')) config_host_data.set('HAVE_OPENPTY', cc.has_function('openpty', dependencies: util)) config_host_data.set('HAVE_STRCHRNUL', cc.has_function('strchrnul')) config_host_data.set('HAVE_SYSTEM_FUNCTION', cc.has_function('system', prefix: '#include ')) +if rdma.found() + config_host_data.set('HAVE_IBV_ADVISE_MR', + cc.has_function('ibv_advise_mr', + args: config_host['RDMA_LIBS'].split(), + prefix: '#include ')) +endif # has_header_symbol config_host_data.set('CONFIG_BYTESWAP_H', @@ -1360,6 +1547,8 @@ config_host_data.set('CONFIG_INOTIFY', cc.has_header_symbol('sys/inotify.h', 'inotify_init')) config_host_data.set('CONFIG_INOTIFY1', cc.has_header_symbol('sys/inotify.h', 'inotify_init1')) +config_host_data.set('CONFIG_IOVEC', + cc.has_header_symbol('sys/uio.h', 'struct iovec')) config_host_data.set('CONFIG_MACHINE_BSWAP_H', cc.has_header_symbol('machine/bswap.h', 'bswap32', prefix: '''#include @@ -1374,6 +1563,8 @@ config_host_data.set('HAVE_OPTRESET', cc.has_header_symbol('getopt.h', 'optreset')) config_host_data.set('HAVE_UTMPX', cc.has_header_symbol('utmpx.h', 'struct utmpx')) +config_host_data.set('HAVE_IPPROTO_MPTCP', + cc.has_header_symbol('netinet/in.h', 'IPPROTO_MPTCP')) # has_member config_host_data.set('HAVE_SIGEV_NOTIFY_THREAD_ID', @@ -1423,11 +1614,33 @@ config_host_data.set('CONFIG_POSIX_MADVISE', cc.links(gnu_source_prefix + ''' #include #include int main(void) { return posix_madvise(NULL, 0, POSIX_MADV_DONTNEED); }''')) + +config_host_data.set('CONFIG_PTHREAD_SETNAME_NP_W_TID', cc.links(''' + #include + + static void *f(void *p) { return NULL; } + int main(void) + { + pthread_t thread; + pthread_create(&thread, 0, f, 0); + pthread_setname_np(thread, "QEMU"); + return 0; + }''', dependencies: threads)) +config_host_data.set('CONFIG_PTHREAD_SETNAME_NP_WO_TID', cc.links(''' + #include + + static void *f(void *p) { pthread_setname_np("QEMU"); return NULL; } + int main(void) + { + pthread_t thread; + pthread_create(&thread, 0, f, 0); + return 0; + }''', dependencies: threads)) + config_host_data.set('CONFIG_SIGNALFD', cc.links(gnu_source_prefix + ''' - #include - #include - #include - int main(void) { return syscall(SYS_signalfd, -1, NULL, _NSIG / 8); }''')) + #include + #include + int main(void) { return signalfd(-1, NULL, SFD_CLOEXEC); }''')) config_host_data.set('CONFIG_SPLICE', cc.links(gnu_source_prefix + ''' #include #include @@ -1441,6 +1654,47 @@ config_host_data.set('CONFIG_SPLICE', cc.links(gnu_source_prefix + ''' return 0; }''')) +config_host_data.set('HAVE_MLOCKALL', cc.links(gnu_source_prefix + ''' + #include + int main(int argc, char *argv[]) { + return mlockall(MCL_FUTURE); + }''')) + +have_l2tpv3 = false +if not get_option('l2tpv3').disabled() and have_system + have_l2tpv3 = (cc.has_header_symbol('sys/socket.h', 'struct mmsghdr') + and cc.has_header('linux/ip.h')) +endif +config_host_data.set('CONFIG_L2TPV3', have_l2tpv3) + +have_netmap = false +if not get_option('netmap').disabled() and have_system + have_netmap = cc.compiles(''' + #include + #include + #include + #include + #if (NETMAP_API < 11) || (NETMAP_API > 15) + #error + #endif + int main(void) { return 0; }''') + if not have_netmap and get_option('netmap').enabled() + error('Netmap headers not available') + endif +endif +config_host_data.set('CONFIG_NETMAP', have_netmap) + +# Work around a system header bug with some kernel/XFS header +# versions where they both try to define 'struct fsxattr': +# xfs headers will not try to redefine structs from linux headers +# if this macro is set. +config_host_data.set('HAVE_FSXATTR', cc.links(''' + #include ' + struct fsxattr foo; + int main(void) { + return 0; + }''')) + # Some versions of Mac OS X incorrectly define SIZE_MAX config_host_data.set('HAVE_BROKEN_SIZE_MAX', not cc.compiles(''' #include @@ -1449,10 +1703,52 @@ config_host_data.set('HAVE_BROKEN_SIZE_MAX', not cc.compiles(''' return printf("%zu", SIZE_MAX); }''', args: ['-Werror'])) +# See if 64-bit atomic operations are supported. +# Note that without __atomic builtins, we can only +# assume atomic loads/stores max at pointer size. +config_host_data.set('CONFIG_ATOMIC64', cc.links(''' + #include + int main(void) + { + uint64_t x = 0, y = 0; + y = __atomic_load_n(&x, __ATOMIC_RELAXED); + __atomic_store_n(&x, y, __ATOMIC_RELAXED); + __atomic_compare_exchange_n(&x, &y, x, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED); + __atomic_exchange_n(&x, y, __ATOMIC_RELAXED); + __atomic_fetch_add(&x, y, __ATOMIC_RELAXED); + return 0; + }''')) -ignored = ['CONFIG_QEMU_INTERP_PREFIX'] # actually per-target -arrays = ['CONFIG_AUDIO_DRIVERS', 'CONFIG_BDRV_RW_WHITELIST', 'CONFIG_BDRV_RO_WHITELIST'] -strings = ['HOST_DSOSUF', 'CONFIG_IASL'] +config_host_data.set('CONFIG_GETAUXVAL', cc.links(gnu_source_prefix + ''' + #include + int main(void) { + return getauxval(AT_HWCAP) == 0; + }''')) + +config_host_data.set('CONFIG_AF_VSOCK', cc.compiles(gnu_source_prefix + ''' + #include + #include + #include + #if !defined(AF_VSOCK) + # error missing AF_VSOCK flag + #endif + #include + int main(void) { + int sock, ret; + struct sockaddr_vm svm; + socklen_t len = sizeof(svm); + sock = socket(AF_VSOCK, SOCK_STREAM, 0); + ret = getpeername(sock, (struct sockaddr *)&svm, &len); + if ((ret == -1) && (errno == ENOTCONN)) { + return 0; + } + return -1; + }''')) + +ignored = ['CONFIG_QEMU_INTERP_PREFIX', # actually per-target + 'HAVE_GDB_BIN'] +arrays = ['CONFIG_BDRV_RW_WHITELIST', 'CONFIG_BDRV_RO_WHITELIST'] +strings = ['CONFIG_IASL'] foreach k, v: config_host if ignored.contains(k) # do nothing @@ -1464,11 +1760,8 @@ foreach k, v: config_host elif k == 'ARCH' config_host_data.set('HOST_' + v.to_upper(), 1) elif strings.contains(k) - if not k.startswith('CONFIG_') - k = 'CONFIG_' + k.to_upper() - endif config_host_data.set_quoted(k, v) - elif k.startswith('CONFIG_') or k.startswith('HAVE_') or k.startswith('HOST_') + elif k.startswith('CONFIG_') config_host_data.set(k, v == 'y' ? 1 : v) endif endforeach @@ -1519,8 +1812,9 @@ endif have_ivshmem = config_host_data.get('CONFIG_EVENTFD') host_kconfig = \ + (get_option('fuzzing') ? ['CONFIG_FUZZ=y'] : []) + \ ('CONFIG_TPM' in config_host ? ['CONFIG_TPM=y'] : []) + \ - ('CONFIG_SPICE' in config_host ? ['CONFIG_SPICE=y'] : []) + \ + (spice.found() ? ['CONFIG_SPICE=y'] : []) + \ (have_ivshmem ? ['CONFIG_IVSHMEM=y'] : []) + \ ('CONFIG_OPENGL' in config_host ? ['CONFIG_OPENGL=y'] : []) + \ (x11.found() ? ['CONFIG_X11=y'] : []) + \ @@ -1971,26 +2265,26 @@ genh += configure_file(output: 'config-host.h', configuration: config_host_data) hxtool = find_program('scripts/hxtool') shaderinclude = find_program('scripts/shaderinclude.pl') qapi_gen = find_program('scripts/qapi-gen.py') -qapi_gen_depends = [ meson.source_root() / 'scripts/qapi/__init__.py', - meson.source_root() / 'scripts/qapi/commands.py', - meson.source_root() / 'scripts/qapi/common.py', - meson.source_root() / 'scripts/qapi/error.py', - meson.source_root() / 'scripts/qapi/events.py', - meson.source_root() / 'scripts/qapi/expr.py', - meson.source_root() / 'scripts/qapi/gen.py', - meson.source_root() / 'scripts/qapi/introspect.py', - meson.source_root() / 'scripts/qapi/parser.py', - meson.source_root() / 'scripts/qapi/schema.py', - meson.source_root() / 'scripts/qapi/source.py', - meson.source_root() / 'scripts/qapi/types.py', - meson.source_root() / 'scripts/qapi/visit.py', - meson.source_root() / 'scripts/qapi/common.py', - meson.source_root() / 'scripts/qapi-gen.py' +qapi_gen_depends = [ meson.current_source_dir() / 'scripts/qapi/__init__.py', + meson.current_source_dir() / 'scripts/qapi/commands.py', + meson.current_source_dir() / 'scripts/qapi/common.py', + meson.current_source_dir() / 'scripts/qapi/error.py', + meson.current_source_dir() / 'scripts/qapi/events.py', + meson.current_source_dir() / 'scripts/qapi/expr.py', + meson.current_source_dir() / 'scripts/qapi/gen.py', + meson.current_source_dir() / 'scripts/qapi/introspect.py', + meson.current_source_dir() / 'scripts/qapi/parser.py', + meson.current_source_dir() / 'scripts/qapi/schema.py', + meson.current_source_dir() / 'scripts/qapi/source.py', + meson.current_source_dir() / 'scripts/qapi/types.py', + meson.current_source_dir() / 'scripts/qapi/visit.py', + meson.current_source_dir() / 'scripts/qapi/common.py', + meson.current_source_dir() / 'scripts/qapi-gen.py' ] tracetool = [ python, files('scripts/tracetool.py'), - '--backend=' + config_host['TRACE_BACKENDS'] + '--backend=' + ','.join(get_option('trace_backends')) ] tracetool_depends = files( 'scripts/tracetool/backend/log.py', @@ -2061,6 +2355,7 @@ bsd_user_ss = ss.source_set() chardev_ss = ss.source_set() common_ss = ss.source_set() crypto_ss = ss.source_set() +hwcore_ss = ss.source_set() io_ss = ss.source_set() linux_user_ss = ss.source_set() qmp_ss = ss.source_set() @@ -2143,6 +2438,7 @@ if have_system 'hw/misc/macio', 'hw/net', 'hw/net/can', + 'hw/nubus', 'hw/nvme', 'hw/nvram', 'hw/pci', @@ -2154,6 +2450,7 @@ if have_system 'hw/s390x', 'hw/scsi', 'hw/sd', + 'hw/sh4', 'hw/sparc', 'hw/sparc64', 'hw/ssi', @@ -2306,12 +2603,9 @@ subdir('bsd-user') subdir('linux-user') subdir('ebpf') -common_ss.add(libbpf) - -bsd_user_ss.add(files('gdbstub.c')) specific_ss.add_all(when: 'CONFIG_BSD_USER', if_true: bsd_user_ss) -linux_user_ss.add(files('gdbstub.c', 'thunk.c')) +linux_user_ss.add(files('thunk.c')) specific_ss.add_all(when: 'CONFIG_LINUX_USER', if_true: linux_user_ss) # needed for fuzzing binaries @@ -2512,7 +2806,8 @@ libchardev = static_library('chardev', chardev_ss.sources() + genh, chardev = declare_dependency(link_whole: libchardev) -libhwcore = static_library('hwcore', sources: hwcore_files + genh, +hwcore_ss = hwcore_ss.apply(config_host, strict: false) +libhwcore = static_library('hwcore', sources: hwcore_ss.sources() + genh, name_suffix: 'fa', build_by_default: false, pic: 'AS_SHARED_LIB' in config_host) @@ -2647,23 +2942,23 @@ foreach target : target_dirs if target.endswith('-softmmu') execs = [{ 'name': 'qemu-system-' + target_name, - 'gui': false, + 'win_subsystem': 'console', 'sources': files('softmmu/main.c'), 'dependencies': [] }] if targetos == 'windows' and (sdl.found() or gtk.found()) execs += [{ 'name': 'qemu-system-' + target_name + 'w', - 'gui': true, + 'win_subsystem': 'windows', 'sources': files('softmmu/main.c'), 'dependencies': [] }] endif - if config_host.has_key('CONFIG_FUZZ') + if get_option('fuzzing') specific_fuzz = specific_fuzz_ss.apply(config_target, strict: false) execs += [{ 'name': 'qemu-fuzz-' + target_name, - 'gui': false, + 'win_subsystem': 'console', 'sources': specific_fuzz.sources(), 'dependencies': specific_fuzz.dependencies(), }] @@ -2671,7 +2966,7 @@ foreach target : target_dirs else execs = [{ 'name': 'qemu-' + target_name, - 'gui': false, + 'win_subsystem': 'console', 'sources': [], 'dependencies': [] }] @@ -2691,7 +2986,7 @@ foreach target : target_dirs link_language: link_language, link_depends: [block_syms, qemu_syms] + exe.get('link_depends', []), link_args: link_args, - gui_app: exe['gui']) + win_subsystem: exe['win_subsystem']) else if 'AS_SHARED_LIB' in config_host emulator = shared_library(exe_name, exe['sources'], @@ -2741,7 +3036,7 @@ foreach target : target_dirs emulators += {exe['name']: emulator} endif - if 'CONFIG_TRACE_SYSTEMTAP' in config_host + if stap.found() foreach stp: [ {'ext': '.stp-build', 'fmt': 'stap', 'bin': meson.current_build_dir() / exe['name'], 'install': false}, {'ext': '.stp', 'fmt': 'stap', 'bin': get_option('prefix') / get_option('bindir') / exe['name'], 'install': true}, @@ -2894,13 +3189,13 @@ summary_info = {} summary_info += {'git': config_host['GIT']} summary_info += {'make': config_host['MAKE']} summary_info += {'python': '@0@ (version: @1@)'.format(python.full_path(), python.language_version())} -summary_info += {'sphinx-build': sphinx_build.found()} +summary_info += {'sphinx-build': sphinx_build} if config_host.has_key('HAVE_GDB_BIN') summary_info += {'gdb': config_host['HAVE_GDB_BIN']} endif summary_info += {'genisoimage': config_host['GENISOIMAGE']} if targetos == 'windows' and config_host.has_key('CONFIG_GUEST_AGENT') - summary_info += {'wixl': wixl.found() ? wixl.full_path() : false} + summary_info += {'wixl': wixl} endif if slirp_opt != 'disabled' and 'CONFIG_SLIRP_SMBD' in config_host summary_info += {'smbd': config_host['CONFIG_SMBD_COMMAND']} @@ -2918,13 +3213,13 @@ summary_info += {'module support': config_host.has_key('CONFIG_MODULES')} if config_host.has_key('CONFIG_MODULES') summary_info += {'alternative module path': config_host.has_key('CONFIG_MODULE_UPGRADES')} endif -summary_info += {'fuzzing support': config_host.has_key('CONFIG_FUZZ')} +summary_info += {'fuzzing support': get_option('fuzzing')} if have_system - summary_info += {'Audio drivers': config_host['CONFIG_AUDIO_DRIVERS']} + summary_info += {'Audio drivers': ' '.join(audio_drivers_selected)} endif -summary_info += {'Trace backends': config_host['TRACE_BACKENDS']} -if config_host['TRACE_BACKENDS'].split().contains('simple') - summary_info += {'Trace output file': config_host['CONFIG_TRACE_FILE'] + '-'} +summary_info += {'Trace backends': ','.join(get_option('trace_backends'))} +if 'simple' in get_option('trace_backends') + summary_info += {'Trace output file': get_option('trace_file') + '-'} endif summary_info += {'QOM debugging': config_host.has_key('CONFIG_QOM_CAST_DEBUG')} summary_info += {'vhost-kernel support': config_host.has_key('CONFIG_VHOST_KERNEL')} @@ -2991,7 +3286,7 @@ if get_option('cfi') summary_info += {'CFI debug support': get_option('cfi_debug')} endif summary_info += {'strip binaries': get_option('strip')} -summary_info += {'sparse': sparse.found() ? sparse.full_path() : false} +summary_info += {'sparse': sparse} summary_info += {'mingw32 support': targetos == 'windows'} # snarf the cross-compilation information for tests @@ -3029,7 +3324,7 @@ endif summary_info += {'TCG support': config_all.has_key('CONFIG_TCG')} if config_all.has_key('CONFIG_TCG') if get_option('tcg_interpreter') - summary_info += {'TCG backend': 'TCI (TCG with bytecode interpreter, experimental and slow)'} + summary_info += {'TCG backend': 'TCI (TCG with bytecode interpreter, slow)'} else summary_info += {'TCG backend': 'native (@0@)'.format(cpu)} endif @@ -3063,19 +3358,19 @@ if have_block summary_info += {'vvfat support': config_host.has_key('CONFIG_VVFAT')} summary_info += {'qed support': config_host.has_key('CONFIG_QED')} summary_info += {'parallels support': config_host.has_key('CONFIG_PARALLELS')} - summary_info += {'FUSE exports': fuse.found()} + summary_info += {'FUSE exports': fuse} endif summary(summary_info, bool_yn: true, section: 'Block layer support') # Crypto summary_info = {} summary_info += {'TLS priority': config_host['CONFIG_TLS_PRIORITY']} -summary_info += {'GNUTLS support': gnutls.found()} -summary_info += {'GNUTLS crypto': gnutls_crypto.found()} -# TODO: add back version -summary_info += {'libgcrypt': gcrypt.found()} -# TODO: add back version -summary_info += {'nettle': nettle.found()} +summary_info += {'GNUTLS support': gnutls} +if gnutls.found() + summary_info += {' GNUTLS crypto': gnutls_crypto.found()} +endif +summary_info += {'libgcrypt': gcrypt} +summary_info += {'nettle': nettle} if nettle.found() summary_info += {' XTS': xts != 'private'} endif @@ -3087,76 +3382,87 @@ summary(summary_info, bool_yn: true, section: 'Crypto') # Libraries summary_info = {} if targetos == 'darwin' - summary_info += {'Cocoa support': cocoa.found()} + summary_info += {'Cocoa support': cocoa} endif -# TODO: add back version -summary_info += {'SDL support': sdl.found()} -summary_info += {'SDL image support': sdl_image.found()} -# TODO: add back version -summary_info += {'GTK support': gtk.found()} -summary_info += {'pixman': pixman.found()} -# TODO: add back version -summary_info += {'VTE support': vte.found()} -# TODO: add back version -summary_info += {'slirp support': slirp_opt == 'disabled' ? false : slirp_opt} -summary_info += {'libtasn1': tasn1.found()} -summary_info += {'PAM': pam.found()} -summary_info += {'iconv support': iconv.found()} -summary_info += {'curses support': curses.found()} -# TODO: add back version -summary_info += {'virgl support': virgl.found()} -summary_info += {'curl support': curl.found()} -summary_info += {'Multipath support': mpathpersist.found()} -summary_info += {'VNC support': vnc.found()} +summary_info += {'SDL support': sdl} +summary_info += {'SDL image support': sdl_image} +summary_info += {'GTK support': gtk} +summary_info += {'pixman': pixman} +summary_info += {'VTE support': vte} +summary_info += {'slirp support': slirp_opt == 'internal' ? slirp_opt : slirp} +summary_info += {'libtasn1': tasn1} +summary_info += {'PAM': pam} +summary_info += {'iconv support': iconv} +summary_info += {'curses support': curses} +summary_info += {'virgl support': virgl} +summary_info += {'curl support': curl} +summary_info += {'Multipath support': mpathpersist} +summary_info += {'VNC support': vnc} if vnc.found() - summary_info += {'VNC SASL support': sasl.found()} - summary_info += {'VNC JPEG support': jpeg.found()} - summary_info += {'VNC PNG support': png.found()} + summary_info += {'VNC SASL support': sasl} + summary_info += {'VNC JPEG support': jpeg} + summary_info += {'VNC PNG support': png} endif -summary_info += {'brlapi support': brlapi.found()} -summary_info += {'vde support': config_host.has_key('CONFIG_VDE')} -summary_info += {'netmap support': config_host.has_key('CONFIG_NETMAP')} -summary_info += {'Linux AIO support': config_host.has_key('CONFIG_LINUX_AIO')} -summary_info += {'Linux io_uring support': linux_io_uring.found()} -summary_info += {'ATTR/XATTR support': libattr.found()} +if targetos not in ['darwin', 'haiku', 'windows'] + summary_info += {'OSS support': oss} +elif targetos == 'darwin' + summary_info += {'CoreAudio support': coreaudio} +elif targetos == 'windows' + summary_info += {'DirectSound support': dsound} +endif +if targetos == 'linux' + summary_info += {'ALSA support': alsa} + summary_info += {'PulseAudio support': pulse} +endif +summary_info += {'JACK support': jack} +summary_info += {'brlapi support': brlapi} +summary_info += {'vde support': vde} +summary_info += {'netmap support': have_netmap} +summary_info += {'l2tpv3 support': have_l2tpv3} +summary_info += {'Linux AIO support': libaio} +summary_info += {'Linux io_uring support': linux_io_uring} +summary_info += {'ATTR/XATTR support': libattr} summary_info += {'RDMA support': config_host.has_key('CONFIG_RDMA')} summary_info += {'PVRDMA support': config_host.has_key('CONFIG_PVRDMA')} summary_info += {'fdt support': fdt_opt == 'disabled' ? false : fdt_opt} -summary_info += {'libcap-ng support': libcap_ng.found()} -summary_info += {'bpf support': libbpf.found()} -# TODO: add back protocol and server version -summary_info += {'spice support': config_host.has_key('CONFIG_SPICE')} -summary_info += {'rbd support': rbd.found()} +summary_info += {'libcap-ng support': libcap_ng} +summary_info += {'bpf support': libbpf} +summary_info += {'spice protocol support': spice_protocol} +if spice_protocol.found() + summary_info += {' spice server support': spice} +endif +summary_info += {'rbd support': rbd} summary_info += {'xfsctl support': config_host.has_key('CONFIG_XFS')} -summary_info += {'smartcard support': cacard.found()} -summary_info += {'U2F support': u2f.found()} -summary_info += {'libusb': libusb.found()} -summary_info += {'usb net redir': usbredir.found()} +summary_info += {'smartcard support': cacard} +summary_info += {'U2F support': u2f} +summary_info += {'libusb': libusb} +summary_info += {'usb net redir': usbredir} summary_info += {'OpenGL support': config_host.has_key('CONFIG_OPENGL')} -summary_info += {'GBM': gbm.found()} -summary_info += {'libiscsi support': libiscsi.found()} -summary_info += {'libnfs support': libnfs.found()} +summary_info += {'GBM': gbm} +summary_info += {'libiscsi support': libiscsi} +summary_info += {'libnfs support': libnfs} if targetos == 'windows' if config_host.has_key('CONFIG_GUEST_AGENT') summary_info += {'QGA VSS support': config_host.has_key('CONFIG_QGA_VSS')} summary_info += {'QGA w32 disk info': config_host.has_key('CONFIG_QGA_NTDDSCSI')} endif endif -summary_info += {'seccomp support': seccomp.found()} -summary_info += {'GlusterFS support': glusterfs.found()} +summary_info += {'seccomp support': seccomp} +summary_info += {'GlusterFS support': glusterfs} summary_info += {'TPM support': config_host.has_key('CONFIG_TPM')} summary_info += {'libssh support': config_host.has_key('CONFIG_LIBSSH')} -summary_info += {'lzo support': lzo.found()} -summary_info += {'snappy support': snappy.found()} -summary_info += {'bzip2 support': libbzip2.found()} -summary_info += {'lzfse support': liblzfse.found()} -summary_info += {'zstd support': zstd.found()} +summary_info += {'lzo support': lzo} +summary_info += {'snappy support': snappy} +summary_info += {'bzip2 support': libbzip2} +summary_info += {'lzfse support': liblzfse} +summary_info += {'zstd support': zstd} summary_info += {'NUMA host support': config_host.has_key('CONFIG_NUMA')} -summary_info += {'libxml2': libxml2.found()} -summary_info += {'capstone': capstone_opt == 'disabled' ? false : capstone_opt} -summary_info += {'libpmem support': libpmem.found()} -summary_info += {'libdaxctl support': libdaxctl.found()} -summary_info += {'libudev': libudev.found()} +summary_info += {'libxml2': libxml2} +summary_info += {'capstone': capstone_opt == 'internal' ? capstone_opt : capstone} +summary_info += {'libpmem support': libpmem} +summary_info += {'libdaxctl support': libdaxctl} +summary_info += {'libudev': libudev} +# Dummy dependency, keep .found() summary_info += {'FUSE lseek': fuse_lseek.found()} summary(summary_info, bool_yn: true, section: 'Dependencies') diff --git a/meson_options.txt b/meson_options.txt index a9a9b8f4c6..411952bc91 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -1,3 +1,7 @@ +# These options do not correspond to a --enable/--disable-* option +# on the configure script command line. If you add more, list them in +# scripts/meson-buildoptions.py's SKIP_OPTIONS constant too. + option('qemu_suffix', type : 'string', value: 'qemu', description: 'Suffix for QEMU data/modules/config directories (can be empty)') option('docdir', type : 'string', value : 'doc', @@ -6,11 +10,24 @@ option('qemu_firmwarepath', type : 'string', value : '', description: 'search PATH for firmware files') option('sphinx_build', type : 'string', value : '', description: 'Use specified sphinx-build [$sphinx_build] for building document (default to be empty)') - option('default_devices', type : 'boolean', value : true, description: 'Include a default selection of devices in emulators') +option('audio_drv_list', type: 'array', value: ['default'], + choices: ['alsa', 'coreaudio', 'default', 'dsound', 'jack', 'oss', 'pa', 'sdl'], + description: 'Set audio driver list') +option('fuzzing_engine', type : 'string', value : '', + description: 'fuzzing engine library for OSS-Fuzz') +option('trace_file', type: 'string', value: 'trace', + description: 'Trace file prefix for simple backend') + +# Everything else can be set via --enable/--disable-* option +# on the configure script command line. After adding an option +# here make sure to run "make update-buildoptions". + option('docs', type : 'feature', value : 'auto', description: 'Documentations build support') +option('fuzzing', type : 'boolean', value: false, + description: 'build fuzzing targets') option('gettext', type : 'feature', value : 'auto', description: 'Localization of the GTK+ user interface') option('install_blobs', type : 'boolean', value : true, @@ -42,7 +59,7 @@ option('xen_pci_passthrough', type: 'feature', value: 'auto', option('tcg', type: 'feature', value: 'auto', description: 'TCG support') option('tcg_interpreter', type: 'boolean', value: false, - description: 'TCG with bytecode interpreter (experimental and slow)') + description: 'TCG with bytecode interpreter (slow)') option('cfi', type: 'boolean', value: 'false', description: 'Control-Flow Integrity (CFI)') option('cfi_debug', type: 'boolean', value: 'false', @@ -94,6 +111,8 @@ option('libusb', type : 'feature', value : 'auto', description: 'libusb support for USB passthrough') option('libxml2', type : 'feature', value : 'auto', description: 'libxml2 support for Parallels image format') +option('linux_aio', type : 'feature', value : 'auto', + description: 'Linux AIO support') option('linux_io_uring', type : 'feature', value : 'auto', description: 'Linux io_uring support') option('lzfse', type : 'feature', value : 'auto', @@ -114,13 +133,23 @@ option('smartcard', type : 'feature', value : 'auto', description: 'CA smartcard emulation support') option('snappy', type : 'feature', value : 'auto', description: 'snappy compression support') +option('spice', type : 'feature', value : 'auto', + description: 'Spice server support') +option('spice_protocol', type : 'feature', value : 'auto', + description: 'Spice protocol support') option('u2f', type : 'feature', value : 'auto', description: 'U2F emulation support') option('usb_redir', type : 'feature', value : 'auto', description: 'libusbredir support') +option('l2tpv3', type : 'feature', value : 'auto', + description: 'l2tpv3 network backend support') +option('netmap', type : 'feature', value : 'auto', + description: 'netmap network backend support') +option('vde', type : 'feature', value : 'auto', + description: 'vde network backend support') option('virglrenderer', type : 'feature', value : 'auto', description: 'virgl rendering support') -option('vnc', type : 'feature', value : 'enabled', +option('vnc', type : 'feature', value : 'auto', description: 'VNC server') option('vnc_jpeg', type : 'feature', value : 'auto', description: 'JPEG lossy compression for VNC server') @@ -139,6 +168,23 @@ option('fuse', type: 'feature', value: 'auto', option('fuse_lseek', type : 'feature', value : 'auto', description: 'SEEK_HOLE/SEEK_DATA support for FUSE exports') +option('trace_backends', type: 'array', value: ['log'], + choices: ['dtrace', 'ftrace', 'log', 'nop', 'simple', 'syslog', 'ust'], + description: 'Set available tracing backends') + +option('alsa', type: 'feature', value: 'auto', + description: 'ALSA sound support') +option('coreaudio', type: 'feature', value: 'auto', + description: 'CoreAudio sound support') +option('dsound', type: 'feature', value: 'auto', + description: 'DirectSound sound support') +option('jack', type: 'feature', value: 'auto', + description: 'JACK sound support') +option('oss', type: 'feature', value: 'auto', + description: 'OSS sound support') +option('pa', type: 'feature', value: 'auto', + description: 'PulseAudio sound support') + option('vhost_user_blk_server', type: 'feature', value: 'auto', description: 'build vhost-user-blk server') option('virtfs', type: 'feature', value: 'auto', diff --git a/migration/block-dirty-bitmap.c b/migration/block-dirty-bitmap.c index 35f5ef688d..9aba7d9c22 100644 --- a/migration/block-dirty-bitmap.c +++ b/migration/block-dirty-bitmap.c @@ -1215,7 +1215,10 @@ static int dirty_bitmap_save_setup(QEMUFile *f, void *opaque) { DBMSaveState *s = &((DBMState *)opaque)->save; SaveBitmapState *dbms = NULL; + + qemu_mutex_lock_iothread(); if (init_dirty_bitmap_migration(s) < 0) { + qemu_mutex_unlock_iothread(); return -1; } @@ -1223,7 +1226,7 @@ static int dirty_bitmap_save_setup(QEMUFile *f, void *opaque) send_bitmap_start(f, s, dbms); } qemu_put_bitmap_flags(f, DIRTY_BITMAP_MIG_FLAG_EOS); - + qemu_mutex_unlock_iothread(); return 0; } diff --git a/migration/colo.c b/migration/colo.c index 79fa1f6619..2415325262 100644 --- a/migration/colo.c +++ b/migration/colo.c @@ -152,7 +152,7 @@ static void primary_vm_do_failover(void) * kick COLO thread which might wait at * qemu_sem_wait(&s->colo_checkpoint_sem). */ - colo_checkpoint_notify(migrate_get_current()); + colo_checkpoint_notify(s); /* * Wake up COLO thread which may blocked in recv() or send(), @@ -205,7 +205,7 @@ void colo_do_failover(void) vm_stop_force_state(RUN_STATE_COLO); } - switch (get_colo_mode()) { + switch (last_colo_mode = get_colo_mode()) { case COLO_MODE_PRIMARY: primary_vm_do_failover(); break; @@ -459,6 +459,10 @@ static int colo_do_checkpoint_transaction(MigrationState *s, if (ret < 0) { goto out; } + + if (migrate_auto_converge()) { + mig_throttle_counter_reset(); + } /* * Only save VM's live state, which not including device state. * TODO: We may need a timeout mechanism to prevent COLO process @@ -530,8 +534,7 @@ static void colo_process_checkpoint(MigrationState *s) Error *local_err = NULL; int ret; - last_colo_mode = get_colo_mode(); - if (last_colo_mode != COLO_MODE_PRIMARY) { + if (get_colo_mode() != COLO_MODE_PRIMARY) { error_report("COLO mode must be COLO_MODE_PRIMARY"); return; } @@ -640,6 +643,7 @@ out: */ if (s->rp_state.from_dst_file) { qemu_fclose(s->rp_state.from_dst_file); + s->rp_state.from_dst_file = NULL; } } @@ -829,8 +833,7 @@ void *colo_process_incoming_thread(void *opaque) migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_COLO); - last_colo_mode = get_colo_mode(); - if (last_colo_mode != COLO_MODE_SECONDARY) { + if (get_colo_mode() != COLO_MODE_SECONDARY) { error_report("COLO mode must be COLO_MODE_SECONDARY"); return NULL; } @@ -918,11 +921,6 @@ out: /* Hope this not to be too long to loop here */ qemu_sem_wait(&mis->colo_incoming_sem); qemu_sem_destroy(&mis->colo_incoming_sem); - /* Must be called after failover BH is completed */ - if (mis->to_src_file) { - qemu_fclose(mis->to_src_file); - mis->to_src_file = NULL; - } rcu_unregister_thread(); return NULL; diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c index 320c56ba2c..d65e744af9 100644 --- a/migration/dirtyrate.c +++ b/migration/dirtyrate.c @@ -15,7 +15,9 @@ #include "qapi/error.h" #include "cpu.h" #include "exec/ramblock.h" +#include "exec/ram_addr.h" #include "qemu/rcu_queue.h" +#include "qemu/main-loop.h" #include "qapi/qapi-commands-migration.h" #include "ram.h" #include "trace.h" @@ -23,9 +25,26 @@ #include "monitor/hmp.h" #include "monitor/monitor.h" #include "qapi/qmp/qdict.h" +#include "sysemu/kvm.h" +#include "sysemu/runstate.h" +#include "exec/memory.h" + +/* + * total_dirty_pages is procted by BQL and is used + * to stat dirty pages during the period of two + * memory_global_dirty_log_sync + */ +uint64_t total_dirty_pages; + +typedef struct DirtyPageRecord { + uint64_t start_pages; + uint64_t end_pages; +} DirtyPageRecord; static int CalculatingState = DIRTY_RATE_STATUS_UNSTARTED; static struct DirtyRateStat DirtyStat; +static DirtyRateMeasureMode dirtyrate_mode = + DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING; static int64_t set_sample_page_period(int64_t msec, int64_t initial_time) { @@ -70,51 +89,94 @@ static int dirtyrate_set_state(int *state, int old_state, int new_state) static struct DirtyRateInfo *query_dirty_rate_info(void) { + int i; int64_t dirty_rate = DirtyStat.dirty_rate; struct DirtyRateInfo *info = g_malloc0(sizeof(DirtyRateInfo)); - - if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURED) { - info->has_dirty_rate = true; - info->dirty_rate = dirty_rate; - } + DirtyRateVcpuList *head = NULL, **tail = &head; info->status = CalculatingState; info->start_time = DirtyStat.start_time; info->calc_time = DirtyStat.calc_time; info->sample_pages = DirtyStat.sample_pages; + info->mode = dirtyrate_mode; + + if (qatomic_read(&CalculatingState) == DIRTY_RATE_STATUS_MEASURED) { + info->has_dirty_rate = true; + info->dirty_rate = dirty_rate; + + if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) { + /* + * set sample_pages with 0 to indicate page sampling + * isn't enabled + **/ + info->sample_pages = 0; + info->has_vcpu_dirty_rate = true; + for (i = 0; i < DirtyStat.dirty_ring.nvcpu; i++) { + DirtyRateVcpu *rate = g_malloc0(sizeof(DirtyRateVcpu)); + rate->id = DirtyStat.dirty_ring.rates[i].id; + rate->dirty_rate = DirtyStat.dirty_ring.rates[i].dirty_rate; + QAPI_LIST_APPEND(tail, rate); + } + info->vcpu_dirty_rate = head; + } + + if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) { + info->sample_pages = 0; + } + } trace_query_dirty_rate_info(DirtyRateStatus_str(CalculatingState)); return info; } -static void init_dirtyrate_stat(int64_t start_time, int64_t calc_time, - uint64_t sample_pages) +static void init_dirtyrate_stat(int64_t start_time, + struct DirtyRateConfig config) { - DirtyStat.total_dirty_samples = 0; - DirtyStat.total_sample_count = 0; - DirtyStat.total_block_mem_MB = 0; DirtyStat.dirty_rate = -1; DirtyStat.start_time = start_time; - DirtyStat.calc_time = calc_time; - DirtyStat.sample_pages = sample_pages; + DirtyStat.calc_time = config.sample_period_seconds; + DirtyStat.sample_pages = config.sample_pages_per_gigabytes; + + switch (config.mode) { + case DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING: + DirtyStat.page_sampling.total_dirty_samples = 0; + DirtyStat.page_sampling.total_sample_count = 0; + DirtyStat.page_sampling.total_block_mem_MB = 0; + break; + case DIRTY_RATE_MEASURE_MODE_DIRTY_RING: + DirtyStat.dirty_ring.nvcpu = -1; + DirtyStat.dirty_ring.rates = NULL; + break; + default: + break; + } +} + +static void cleanup_dirtyrate_stat(struct DirtyRateConfig config) +{ + /* last calc-dirty-rate qmp use dirty ring mode */ + if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) { + free(DirtyStat.dirty_ring.rates); + DirtyStat.dirty_ring.rates = NULL; + } } static void update_dirtyrate_stat(struct RamblockDirtyInfo *info) { - DirtyStat.total_dirty_samples += info->sample_dirty_count; - DirtyStat.total_sample_count += info->sample_pages_count; + DirtyStat.page_sampling.total_dirty_samples += info->sample_dirty_count; + DirtyStat.page_sampling.total_sample_count += info->sample_pages_count; /* size of total pages in MB */ - DirtyStat.total_block_mem_MB += (info->ramblock_pages * - TARGET_PAGE_SIZE) >> 20; + DirtyStat.page_sampling.total_block_mem_MB += (info->ramblock_pages * + TARGET_PAGE_SIZE) >> 20; } static void update_dirtyrate(uint64_t msec) { uint64_t dirtyrate; - uint64_t total_dirty_samples = DirtyStat.total_dirty_samples; - uint64_t total_sample_count = DirtyStat.total_sample_count; - uint64_t total_block_mem_MB = DirtyStat.total_block_mem_MB; + uint64_t total_dirty_samples = DirtyStat.page_sampling.total_dirty_samples; + uint64_t total_sample_count = DirtyStat.page_sampling.total_sample_count; + uint64_t total_block_mem_MB = DirtyStat.page_sampling.total_block_mem_MB; dirtyrate = total_dirty_samples * total_block_mem_MB * 1000 / (total_sample_count * msec); @@ -327,21 +389,183 @@ static bool compare_page_hash_info(struct RamblockDirtyInfo *info, update_dirtyrate_stat(block_dinfo); } - if (DirtyStat.total_sample_count == 0) { + if (DirtyStat.page_sampling.total_sample_count == 0) { return false; } return true; } -static void calculate_dirtyrate(struct DirtyRateConfig config) +static inline void record_dirtypages(DirtyPageRecord *dirty_pages, + CPUState *cpu, bool start) +{ + if (start) { + dirty_pages[cpu->cpu_index].start_pages = cpu->dirty_pages; + } else { + dirty_pages[cpu->cpu_index].end_pages = cpu->dirty_pages; + } +} + +static void dirtyrate_global_dirty_log_start(void) +{ + qemu_mutex_lock_iothread(); + memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE); + qemu_mutex_unlock_iothread(); +} + +static void dirtyrate_global_dirty_log_stop(void) +{ + qemu_mutex_lock_iothread(); + memory_global_dirty_log_sync(); + memory_global_dirty_log_stop(GLOBAL_DIRTY_DIRTY_RATE); + qemu_mutex_unlock_iothread(); +} + +static int64_t do_calculate_dirtyrate_vcpu(DirtyPageRecord dirty_pages) +{ + uint64_t memory_size_MB; + int64_t time_s; + uint64_t increased_dirty_pages = + dirty_pages.end_pages - dirty_pages.start_pages; + + memory_size_MB = (increased_dirty_pages * TARGET_PAGE_SIZE) >> 20; + time_s = DirtyStat.calc_time; + + return memory_size_MB / time_s; +} + +static inline void record_dirtypages_bitmap(DirtyPageRecord *dirty_pages, + bool start) +{ + if (start) { + dirty_pages->start_pages = total_dirty_pages; + } else { + dirty_pages->end_pages = total_dirty_pages; + } +} + +static void do_calculate_dirtyrate_bitmap(DirtyPageRecord dirty_pages) +{ + DirtyStat.dirty_rate = do_calculate_dirtyrate_vcpu(dirty_pages); +} + +static inline void dirtyrate_manual_reset_protect(void) +{ + RAMBlock *block = NULL; + + WITH_RCU_READ_LOCK_GUARD() { + RAMBLOCK_FOREACH_MIGRATABLE(block) { + memory_region_clear_dirty_bitmap(block->mr, 0, + block->used_length); + } + } +} + +static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config) +{ + int64_t msec = 0; + int64_t start_time; + DirtyPageRecord dirty_pages; + + qemu_mutex_lock_iothread(); + memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE); + + /* + * 1'round of log sync may return all 1 bits with + * KVM_DIRTY_LOG_INITIALLY_SET enable + * skip it unconditionally and start dirty tracking + * from 2'round of log sync + */ + memory_global_dirty_log_sync(); + + /* + * reset page protect manually and unconditionally. + * this make sure kvm dirty log be cleared if + * KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE cap is enabled. + */ + dirtyrate_manual_reset_protect(); + qemu_mutex_unlock_iothread(); + + record_dirtypages_bitmap(&dirty_pages, true); + + start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); + DirtyStat.start_time = start_time / 1000; + + msec = config.sample_period_seconds * 1000; + msec = set_sample_page_period(msec, start_time); + DirtyStat.calc_time = msec / 1000; + + /* + * dirtyrate_global_dirty_log_stop do two things. + * 1. fetch dirty bitmap from kvm + * 2. stop dirty tracking + */ + dirtyrate_global_dirty_log_stop(); + + record_dirtypages_bitmap(&dirty_pages, false); + + do_calculate_dirtyrate_bitmap(dirty_pages); +} + +static void calculate_dirtyrate_dirty_ring(struct DirtyRateConfig config) +{ + CPUState *cpu; + int64_t msec = 0; + int64_t start_time; + uint64_t dirtyrate = 0; + uint64_t dirtyrate_sum = 0; + DirtyPageRecord *dirty_pages; + int nvcpu = 0; + int i = 0; + + CPU_FOREACH(cpu) { + nvcpu++; + } + + dirty_pages = malloc(sizeof(*dirty_pages) * nvcpu); + + DirtyStat.dirty_ring.nvcpu = nvcpu; + DirtyStat.dirty_ring.rates = malloc(sizeof(DirtyRateVcpu) * nvcpu); + + dirtyrate_global_dirty_log_start(); + + CPU_FOREACH(cpu) { + record_dirtypages(dirty_pages, cpu, true); + } + + start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); + DirtyStat.start_time = start_time / 1000; + + msec = config.sample_period_seconds * 1000; + msec = set_sample_page_period(msec, start_time); + DirtyStat.calc_time = msec / 1000; + + dirtyrate_global_dirty_log_stop(); + + CPU_FOREACH(cpu) { + record_dirtypages(dirty_pages, cpu, false); + } + + for (i = 0; i < DirtyStat.dirty_ring.nvcpu; i++) { + dirtyrate = do_calculate_dirtyrate_vcpu(dirty_pages[i]); + trace_dirtyrate_do_calculate_vcpu(i, dirtyrate); + + DirtyStat.dirty_ring.rates[i].id = i; + DirtyStat.dirty_ring.rates[i].dirty_rate = dirtyrate; + dirtyrate_sum += dirtyrate; + } + + DirtyStat.dirty_rate = dirtyrate_sum; + free(dirty_pages); +} + +static void calculate_dirtyrate_sample_vm(struct DirtyRateConfig config) { struct RamblockDirtyInfo *block_dinfo = NULL; int block_count = 0; int64_t msec = 0; int64_t initial_time; - rcu_register_thread(); rcu_read_lock(); initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); if (!record_ramblock_hash_info(&block_dinfo, config, &block_count)) { @@ -364,16 +588,26 @@ static void calculate_dirtyrate(struct DirtyRateConfig config) out: rcu_read_unlock(); free_ramblock_dirty_info(block_dinfo, block_count); - rcu_unregister_thread(); +} + +static void calculate_dirtyrate(struct DirtyRateConfig config) +{ + if (config.mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) { + calculate_dirtyrate_dirty_bitmap(config); + } else if (config.mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) { + calculate_dirtyrate_dirty_ring(config); + } else { + calculate_dirtyrate_sample_vm(config); + } + + trace_dirtyrate_calculate(DirtyStat.dirty_rate); } void *get_dirtyrate_thread(void *arg) { struct DirtyRateConfig config = *(struct DirtyRateConfig *)arg; int ret; - int64_t start_time; - int64_t calc_time; - uint64_t sample_pages; + rcu_register_thread(); ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_UNSTARTED, DIRTY_RATE_STATUS_MEASURING); @@ -382,11 +616,6 @@ void *get_dirtyrate_thread(void *arg) return NULL; } - start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000; - calc_time = config.sample_period_seconds; - sample_pages = config.sample_pages_per_gigabytes; - init_dirtyrate_stat(start_time, calc_time, sample_pages); - calculate_dirtyrate(config); ret = dirtyrate_set_state(&CalculatingState, DIRTY_RATE_STATUS_MEASURING, @@ -394,15 +623,22 @@ void *get_dirtyrate_thread(void *arg) if (ret == -1) { error_report("change dirtyrate state failed."); } + + rcu_unregister_thread(); return NULL; } -void qmp_calc_dirty_rate(int64_t calc_time, bool has_sample_pages, - int64_t sample_pages, Error **errp) +void qmp_calc_dirty_rate(int64_t calc_time, + bool has_sample_pages, + int64_t sample_pages, + bool has_mode, + DirtyRateMeasureMode mode, + Error **errp) { static struct DirtyRateConfig config; QemuThread thread; int ret; + int64_t start_time; /* * If the dirty rate is already being measured, don't attempt to start. @@ -419,6 +655,15 @@ void qmp_calc_dirty_rate(int64_t calc_time, bool has_sample_pages, return; } + if (!has_mode) { + mode = DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING; + } + + if (has_sample_pages && mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) { + error_setg(errp, "either sample-pages or dirty-ring can be specified."); + return; + } + if (has_sample_pages) { if (!is_sample_pages_valid(sample_pages)) { error_setg(errp, "sample-pages is out of range[%d, %d].", @@ -430,6 +675,19 @@ void qmp_calc_dirty_rate(int64_t calc_time, bool has_sample_pages, sample_pages = DIRTYRATE_DEFAULT_SAMPLE_PAGES; } + /* + * dirty ring mode only works when kvm dirty ring is enabled. + * on the contrary, dirty bitmap mode is not. + */ + if (((mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) && + !kvm_dirty_ring_enabled()) || + ((mode == DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP) && + kvm_dirty_ring_enabled())) { + error_setg(errp, "mode %s is not enabled, use other method instead.", + DirtyRateMeasureMode_str(mode)); + return; + } + /* * Init calculation state as unstarted. */ @@ -442,6 +700,19 @@ void qmp_calc_dirty_rate(int64_t calc_time, bool has_sample_pages, config.sample_period_seconds = calc_time; config.sample_pages_per_gigabytes = sample_pages; + config.mode = mode; + + cleanup_dirtyrate_stat(config); + + /* + * update dirty rate mode so that we can figure out what mode has + * been used in last calculation + **/ + dirtyrate_mode = mode; + + start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000; + init_dirtyrate_stat(start_time, config); + qemu_thread_create(&thread, "get_dirtyrate", get_dirtyrate_thread, (void *)&config, QEMU_THREAD_DETACHED); } @@ -463,12 +734,24 @@ void hmp_info_dirty_rate(Monitor *mon, const QDict *qdict) info->sample_pages); monitor_printf(mon, "Period: %"PRIi64" (sec)\n", info->calc_time); + monitor_printf(mon, "Mode: %s\n", + DirtyRateMeasureMode_str(info->mode)); monitor_printf(mon, "Dirty rate: "); if (info->has_dirty_rate) { monitor_printf(mon, "%"PRIi64" (MB/s)\n", info->dirty_rate); + if (info->has_vcpu_dirty_rate) { + DirtyRateVcpuList *rate, *head = info->vcpu_dirty_rate; + for (rate = head; rate != NULL; rate = rate->next) { + monitor_printf(mon, "vcpu[%"PRIi64"], Dirty rate: %"PRIi64 + " (MB/s)\n", rate->value->id, + rate->value->dirty_rate); + } + } } else { monitor_printf(mon, "(not ready)\n"); } + + qapi_free_DirtyRateVcpuList(info->vcpu_dirty_rate); g_free(info); } @@ -477,6 +760,9 @@ void hmp_calc_dirty_rate(Monitor *mon, const QDict *qdict) int64_t sec = qdict_get_try_int(qdict, "second", 0); int64_t sample_pages = qdict_get_try_int(qdict, "sample_pages_per_GB", -1); bool has_sample_pages = (sample_pages != -1); + bool dirty_ring = qdict_get_try_bool(qdict, "dirty_ring", false); + bool dirty_bitmap = qdict_get_try_bool(qdict, "dirty_bitmap", false); + DirtyRateMeasureMode mode = DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING; Error *err = NULL; if (!sec) { @@ -484,7 +770,20 @@ void hmp_calc_dirty_rate(Monitor *mon, const QDict *qdict) return; } - qmp_calc_dirty_rate(sec, has_sample_pages, sample_pages, &err); + if (dirty_ring && dirty_bitmap) { + monitor_printf(mon, "Either dirty ring or dirty bitmap " + "can be specified!\n"); + return; + } + + if (dirty_bitmap) { + mode = DIRTY_RATE_MEASURE_MODE_DIRTY_BITMAP; + } else if (dirty_ring) { + mode = DIRTY_RATE_MEASURE_MODE_DIRTY_RING; + } + + qmp_calc_dirty_rate(sec, has_sample_pages, sample_pages, true, + mode, &err); if (err) { hmp_handle_error(mon, err); return; diff --git a/migration/dirtyrate.h b/migration/dirtyrate.h index e1fd29089e..69d4c5b865 100644 --- a/migration/dirtyrate.h +++ b/migration/dirtyrate.h @@ -43,6 +43,7 @@ struct DirtyRateConfig { uint64_t sample_pages_per_gigabytes; /* sample pages per GB */ int64_t sample_period_seconds; /* time duration between two sampling */ + DirtyRateMeasureMode mode; /* mode of dirtyrate measurement */ }; /* @@ -58,17 +59,29 @@ struct RamblockDirtyInfo { uint32_t *hash_result; /* array of hash result for sampled pages */ }; +typedef struct SampleVMStat { + uint64_t total_dirty_samples; /* total dirty sampled page */ + uint64_t total_sample_count; /* total sampled pages */ + uint64_t total_block_mem_MB; /* size of total sampled pages in MB */ +} SampleVMStat; + +typedef struct VcpuStat { + int nvcpu; /* number of vcpu */ + DirtyRateVcpu *rates; /* array of dirty rate for each vcpu */ +} VcpuStat; + /* * Store calculation statistics for each measure. */ struct DirtyRateStat { - uint64_t total_dirty_samples; /* total dirty sampled page */ - uint64_t total_sample_count; /* total sampled pages */ - uint64_t total_block_mem_MB; /* size of total sampled pages in MB */ int64_t dirty_rate; /* dirty rate in MB/s */ int64_t start_time; /* calculation start time in units of second */ int64_t calc_time; /* time duration of two sampling in units of second */ uint64_t sample_pages; /* sample pages per GB */ + union { + SampleVMStat page_sampling; + VcpuStat dirty_ring; + }; }; void *get_dirtyrate_thread(void *arg); diff --git a/migration/migration.c b/migration/migration.c index bb909781b7..abaf6f9e3d 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -215,8 +215,11 @@ void migration_object_init(void) dirty_bitmap_mig_init(); } -void migration_cancel(void) +void migration_cancel(const Error *error) { + if (error) { + migrate_set_error(current_migration, error); + } migrate_fd_cancel(current_migration); } @@ -226,7 +229,7 @@ void migration_shutdown(void) * Cancel the current migration - that will (eventually) * stop the migration using this structure */ - migration_cancel(); + migration_cancel(NULL); object_unref(OBJECT(current_migration)); /* @@ -391,7 +394,7 @@ int migrate_send_rp_message_req_pages(MigrationIncomingState *mis, int migrate_send_rp_req_pages(MigrationIncomingState *mis, RAMBlock *rb, ram_addr_t start, uint64_t haddr) { - void *aligned = (void *)(uintptr_t)(haddr & (-qemu_ram_pagesize(rb))); + void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb)); bool received = false; WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) { @@ -453,10 +456,12 @@ static void qemu_start_incoming_migration(const char *uri, Error **errp) { const char *p = NULL; + migrate_protocol_allow_multifd(false); /* reset it anyway */ qapi_event_send_migration(MIGRATION_STATUS_SETUP); if (strstart(uri, "tcp:", &p) || strstart(uri, "unix:", NULL) || strstart(uri, "vsock:", NULL)) { + migrate_protocol_allow_multifd(true); socket_start_incoming_migration(p ? p : uri, errp); #ifdef CONFIG_RDMA } else if (strstart(uri, "rdma:", &p)) { @@ -585,8 +590,10 @@ static void process_incoming_migration_co(void *opaque) mis->have_colo_incoming_thread = true; qemu_coroutine_yield(); + qemu_mutex_unlock_iothread(); /* Wait checkpoint incoming thread exit before free resource */ qemu_thread_join(&mis->colo_incoming_thread); + qemu_mutex_lock_iothread(); /* We hold the global iothread lock, so it is safe here */ colo_release_ram_cache(); } @@ -1235,6 +1242,14 @@ static bool migrate_caps_check(bool *cap_list, } } + /* incoming side only */ + if (runstate_check(RUN_STATE_INMIGRATE) && + !migrate_multifd_is_allowed() && + cap_list[MIGRATION_CAPABILITY_MULTIFD]) { + error_setg(errp, "multifd is not supported by current protocol"); + return false; + } + return true; } @@ -2039,6 +2054,20 @@ void migrate_init(MigrationState *s) s->threshold_size = 0; } +int migrate_add_blocker_internal(Error *reason, Error **errp) +{ + /* Snapshots are similar to migrations, so check RUN_STATE_SAVE_VM too. */ + if (runstate_check(RUN_STATE_SAVE_VM) || !migration_is_idle()) { + error_propagate_prepend(errp, error_copy(reason), + "disallowing migration blocker " + "(migration/snapshot in progress) for: "); + return -EBUSY; + } + + migration_blockers = g_slist_prepend(migration_blockers, reason); + return 0; +} + int migrate_add_blocker(Error *reason, Error **errp) { if (only_migratable) { @@ -2048,15 +2077,7 @@ int migrate_add_blocker(Error *reason, Error **errp) return -EACCES; } - if (migration_is_idle()) { - migration_blockers = g_slist_prepend(migration_blockers, reason); - return 0; - } - - error_propagate_prepend(errp, error_copy(reason), - "disallowing migration blocker " - "(migration in progress) for: "); - return -EBUSY; + return migrate_add_blocker_internal(reason, errp); } void migrate_del_blocker(Error *reason) @@ -2252,10 +2273,11 @@ static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc, migrate_init(s); /* - * set ram_counters memory to zero for a + * set ram_counters compression_counters memory to zero for a * new migration */ memset(&ram_counters, 0, sizeof(ram_counters)); + memset(&compression_counters, 0, sizeof(compression_counters)); return true; } @@ -2280,9 +2302,11 @@ void qmp_migrate(const char *uri, bool has_blk, bool blk, } } + migrate_protocol_allow_multifd(false); if (strstart(uri, "tcp:", &p) || strstart(uri, "unix:", NULL) || strstart(uri, "vsock:", NULL)) { + migrate_protocol_allow_multifd(true); socket_start_outgoing_migration(s, p ? p : uri, &local_err); #ifdef CONFIG_RDMA } else if (strstart(uri, "rdma:", &p)) { @@ -2316,7 +2340,7 @@ void qmp_migrate(const char *uri, bool has_blk, bool blk, void qmp_migrate_cancel(Error **errp) { - migration_cancel(); + migration_cancel(NULL); } void qmp_migrate_continue(MigrationStatus state, Error **errp) @@ -2619,8 +2643,8 @@ static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname, * Since we currently insist on matching page sizes, just sanity check * we're being asked for whole host pages. */ - if (start & (our_host_ps - 1) || - (len & (our_host_ps - 1))) { + if (!QEMU_IS_ALIGNED(start, our_host_ps) || + !QEMU_IS_ALIGNED(len, our_host_ps)) { error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT " len: %zd", __func__, start, len); mark_source_rp_bad(ms); @@ -3168,7 +3192,10 @@ static void migration_completion(MigrationState *s) } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { trace_migration_completion_postcopy_end(); + qemu_mutex_lock_iothread(); qemu_savevm_state_complete_postcopy(s->to_dst_file); + qemu_mutex_unlock_iothread(); + trace_migration_completion_postcopy_end_after_complete(); } else if (s->state == MIGRATION_STATUS_CANCELLING) { goto fail; @@ -3601,7 +3628,9 @@ static void migration_iteration_finish(MigrationState *s) case MIGRATION_STATUS_CANCELLED: case MIGRATION_STATUS_CANCELLING: if (s->vm_was_running) { - vm_start(); + if (!runstate_check(RUN_STATE_SHUTDOWN)) { + vm_start(); + } } else { if (runstate_check(RUN_STATE_FINISH_MIGRATE)) { runstate_set(RUN_STATE_POSTMIGRATE); diff --git a/migration/migration.h b/migration/migration.h index 7a5aa8c2fd..8130b703eb 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -388,7 +388,7 @@ int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque); void migration_make_urgent_request(void); void migration_consume_urgent_request(void); bool migration_rate_limit(void); -void migration_cancel(void); +void migration_cancel(const Error *error); void populate_vfio_info(MigrationInfo *info); diff --git a/migration/multifd.c b/migration/multifd.c index 377da78f5b..7c9deb1921 100644 --- a/migration/multifd.c +++ b/migration/multifd.c @@ -531,7 +531,7 @@ void multifd_save_cleanup(void) { int i; - if (!migrate_use_multifd()) { + if (!migrate_use_multifd() || !migrate_multifd_is_allowed()) { return; } multifd_send_terminate_threads(NULL); @@ -546,6 +546,9 @@ void multifd_save_cleanup(void) MultiFDSendParams *p = &multifd_send_state->params[i]; Error *local_err = NULL; + if (p->registered_yank) { + migration_ioc_unregister_yank(p->c); + } socket_send_channel_destroy(p->c); p->c = NULL; qemu_mutex_destroy(&p->mutex); @@ -813,7 +816,8 @@ static bool multifd_channel_connect(MultiFDSendParams *p, return false; } } else { - /* update for tls qio channel */ + migration_ioc_register_yank(ioc); + p->registered_yank = true; p->c = ioc; qemu_thread_create(&p->thread, p->name, multifd_send_thread, p, QEMU_THREAD_JOINABLE); @@ -864,6 +868,17 @@ cleanup: multifd_new_send_channel_cleanup(p, sioc, local_err); } +static bool migrate_allow_multifd = true; +void migrate_protocol_allow_multifd(bool allow) +{ + migrate_allow_multifd = allow; +} + +bool migrate_multifd_is_allowed(void) +{ + return migrate_allow_multifd; +} + int multifd_save_setup(Error **errp) { int thread_count; @@ -874,6 +889,11 @@ int multifd_save_setup(Error **errp) if (!migrate_use_multifd()) { return 0; } + if (!migrate_multifd_is_allowed()) { + error_setg(errp, "multifd is not supported by current protocol"); + return -1; + } + s = migrate_get_current(); thread_count = migrate_multifd_channels(); multifd_send_state = g_malloc0(sizeof(*multifd_send_state)); @@ -967,7 +987,7 @@ int multifd_load_cleanup(Error **errp) { int i; - if (!migrate_use_multifd()) { + if (!migrate_use_multifd() || !migrate_multifd_is_allowed()) { return 0; } multifd_recv_terminate_threads(NULL); @@ -987,10 +1007,7 @@ int multifd_load_cleanup(Error **errp) for (i = 0; i < migrate_multifd_channels(); i++) { MultiFDRecvParams *p = &multifd_recv_state->params[i]; - if (OBJECT(p->c)->ref == 1) { - migration_ioc_unregister_yank(p->c); - } - + migration_ioc_unregister_yank(p->c); object_unref(OBJECT(p->c)); p->c = NULL; qemu_mutex_destroy(&p->mutex); @@ -1119,6 +1136,10 @@ int multifd_load_setup(Error **errp) if (!migrate_use_multifd()) { return 0; } + if (!migrate_multifd_is_allowed()) { + error_setg(errp, "multifd is not supported by current protocol"); + return -1; + } thread_count = migrate_multifd_channels(); multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state)); multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count); diff --git a/migration/multifd.h b/migration/multifd.h index 8d6751f5ed..15c50ca0b2 100644 --- a/migration/multifd.h +++ b/migration/multifd.h @@ -13,6 +13,8 @@ #ifndef QEMU_MIGRATION_MULTIFD_H #define QEMU_MIGRATION_MULTIFD_H +bool migrate_multifd_is_allowed(void); +void migrate_protocol_allow_multifd(bool allow); int multifd_save_setup(Error **errp); void multifd_save_cleanup(void); int multifd_load_setup(Error **errp); @@ -85,6 +87,8 @@ typedef struct { bool running; /* should this thread finish */ bool quit; + /* is the yank function registered */ + bool registered_yank; /* thread has work to do */ int pending_job; /* array of pages to sent */ diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index 2e9697bdd2..d18b5d05b2 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -402,7 +402,7 @@ bool postcopy_ram_supported_by_host(MigrationIncomingState *mis) strerror(errno)); goto out; } - g_assert(((size_t)testarea & (pagesize - 1)) == 0); + g_assert(QEMU_PTR_IS_ALIGNED(testarea, pagesize)); reg_struct.range.start = (uintptr_t)testarea; reg_struct.range.len = pagesize; @@ -660,7 +660,7 @@ int postcopy_wake_shared(struct PostCopyFD *pcfd, struct uffdio_range range; int ret; trace_postcopy_wake_shared(client_addr, qemu_ram_get_idstr(rb)); - range.start = client_addr & ~(pagesize - 1); + range.start = ROUND_DOWN(client_addr, pagesize); range.len = pagesize; ret = ioctl(pcfd->fd, UFFDIO_WAKE, &range); if (ret) { @@ -671,6 +671,29 @@ int postcopy_wake_shared(struct PostCopyFD *pcfd, return ret; } +static int postcopy_request_page(MigrationIncomingState *mis, RAMBlock *rb, + ram_addr_t start, uint64_t haddr) +{ + void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb)); + + /* + * Discarded pages (via RamDiscardManager) are never migrated. On unlikely + * access, place a zeropage, which will also set the relevant bits in the + * recv_bitmap accordingly, so we won't try placing a zeropage twice. + * + * Checking a single bit is sufficient to handle pagesize > TPS as either + * all relevant bits are set or not. + */ + assert(QEMU_IS_ALIGNED(start, qemu_ram_pagesize(rb))); + if (ramblock_page_is_discarded(rb, start)) { + bool received = ramblock_recv_bitmap_test_byte_offset(rb, start); + + return received ? 0 : postcopy_place_page_zero(mis, aligned, rb); + } + + return migrate_send_rp_req_pages(mis, rb, start, haddr); +} + /* * Callback from shared fault handlers to ask for a page, * the page must be specified by a RAMBlock and an offset in that rb @@ -679,8 +702,7 @@ int postcopy_wake_shared(struct PostCopyFD *pcfd, int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb, uint64_t client_addr, uint64_t rb_offset) { - size_t pagesize = qemu_ram_pagesize(rb); - uint64_t aligned_rbo = rb_offset & ~(pagesize - 1); + uint64_t aligned_rbo = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb)); MigrationIncomingState *mis = migration_incoming_get_current(); trace_postcopy_request_shared_page(pcfd->idstr, qemu_ram_get_idstr(rb), @@ -690,7 +712,7 @@ int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb, qemu_ram_get_idstr(rb), rb_offset); return postcopy_wake_shared(pcfd, client_addr, rb); } - migrate_send_rp_req_pages(mis, rb, aligned_rbo, client_addr); + postcopy_request_page(mis, rb, aligned_rbo, client_addr); return 0; } @@ -970,7 +992,7 @@ static void *postcopy_ram_fault_thread(void *opaque) break; } - rb_offset &= ~(qemu_ram_pagesize(rb) - 1); + rb_offset = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb)); trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address, qemu_ram_get_idstr(rb), rb_offset, @@ -984,8 +1006,8 @@ retry: * Send the request to the source - we want to request one * of our host page sizes (which is >= TPS) */ - ret = migrate_send_rp_req_pages(mis, rb, rb_offset, - msg.arg.pagefault.address); + ret = postcopy_request_page(mis, rb, rb_offset, + msg.arg.pagefault.address); if (ret) { /* May be network failure, try to wait for recovery */ if (ret == -EIO && postcopy_pause_fault_thread(mis)) { @@ -993,7 +1015,7 @@ retry: goto retry; } else { /* This is a unavoidable fault */ - error_report("%s: migrate_send_rp_req_pages() get %d", + error_report("%s: postcopy_request_page() get %d", __func__, ret); break; } @@ -1435,6 +1457,10 @@ void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd) MigrationIncomingState *mis = migration_incoming_get_current(); GArray *pcrfds = mis->postcopy_remote_fds; + if (!pcrfds) { + /* migration has already finished and freed the array */ + return; + } for (i = 0; i < pcrfds->len; i++) { struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i); if (cur->fd == pcfd->fd) { diff --git a/migration/ram.c b/migration/ram.c index 7a43bfd7af..863035d235 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -56,6 +56,8 @@ #include "multifd.h" #include "sysemu/runstate.h" +#include "hw/boards.h" /* for machine_dump_guest_core() */ + #if defined(__linux__) #include "qemu/userfaultfd.h" #endif /* defined(__linux__) */ @@ -639,6 +641,15 @@ static void mig_throttle_guest_down(uint64_t bytes_dirty_period, } } +void mig_throttle_counter_reset(void) +{ + RAMState *rs = ram_state; + + rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); + rs->num_dirty_pages_period = 0; + rs->bytes_xfer_prev = ram_counters.transferred; +} + /** * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache * @@ -789,8 +800,7 @@ unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, return find_next_bit(bitmap, size, start); } -static void migration_clear_memory_region_dirty_bitmap(RAMState *rs, - RAMBlock *rb, +static void migration_clear_memory_region_dirty_bitmap(RAMBlock *rb, unsigned long page) { uint8_t shift; @@ -812,14 +822,13 @@ static void migration_clear_memory_region_dirty_bitmap(RAMState *rs, assert(shift >= 6); size = 1ULL << (TARGET_PAGE_BITS + shift); - start = (((ram_addr_t)page) << TARGET_PAGE_BITS) & (-size); + start = QEMU_ALIGN_DOWN((ram_addr_t)page << TARGET_PAGE_BITS, size); trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page); memory_region_clear_dirty_bitmap(rb->mr, start, size); } static void -migration_clear_memory_region_dirty_bitmap_range(RAMState *rs, - RAMBlock *rb, +migration_clear_memory_region_dirty_bitmap_range(RAMBlock *rb, unsigned long start, unsigned long npages) { @@ -832,10 +841,45 @@ migration_clear_memory_region_dirty_bitmap_range(RAMState *rs, * exclusive. */ for (i = chunk_start; i < chunk_end; i += chunk_pages) { - migration_clear_memory_region_dirty_bitmap(rs, rb, i); + migration_clear_memory_region_dirty_bitmap(rb, i); } } +/* + * colo_bitmap_find_diry:find contiguous dirty pages from start + * + * Returns the page offset within memory region of the start of the contiguout + * dirty page + * + * @rs: current RAM state + * @rb: RAMBlock where to search for dirty pages + * @start: page where we start the search + * @num: the number of contiguous dirty pages + */ +static inline +unsigned long colo_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, + unsigned long start, unsigned long *num) +{ + unsigned long size = rb->used_length >> TARGET_PAGE_BITS; + unsigned long *bitmap = rb->bmap; + unsigned long first, next; + + *num = 0; + + if (ramblock_is_ignored(rb)) { + return size; + } + + first = find_next_bit(bitmap, size, start); + if (first >= size) { + return first; + } + next = find_next_zero_bit(bitmap, size, first + 1); + assert(next >= first); + *num = next - first; + return first; +} + static inline bool migration_bitmap_clear_dirty(RAMState *rs, RAMBlock *rb, unsigned long page) @@ -850,7 +894,7 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs, * the page in the chunk we clear the remote dirty bitmap for all. * Clearing it earlier won't be a problem, but too late will. */ - migration_clear_memory_region_dirty_bitmap(rs, rb, page); + migration_clear_memory_region_dirty_bitmap(rb, page); ret = test_and_clear_bit(page, rb->bmap); if (ret) { @@ -860,6 +904,81 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs, return ret; } +static void dirty_bitmap_clear_section(MemoryRegionSection *section, + void *opaque) +{ + const hwaddr offset = section->offset_within_region; + const hwaddr size = int128_get64(section->size); + const unsigned long start = offset >> TARGET_PAGE_BITS; + const unsigned long npages = size >> TARGET_PAGE_BITS; + RAMBlock *rb = section->mr->ram_block; + uint64_t *cleared_bits = opaque; + + /* + * We don't grab ram_state->bitmap_mutex because we expect to run + * only when starting migration or during postcopy recovery where + * we don't have concurrent access. + */ + if (!migration_in_postcopy() && !migrate_background_snapshot()) { + migration_clear_memory_region_dirty_bitmap_range(rb, start, npages); + } + *cleared_bits += bitmap_count_one_with_offset(rb->bmap, start, npages); + bitmap_clear(rb->bmap, start, npages); +} + +/* + * Exclude all dirty pages from migration that fall into a discarded range as + * managed by a RamDiscardManager responsible for the mapped memory region of + * the RAMBlock. Clear the corresponding bits in the dirty bitmaps. + * + * Discarded pages ("logically unplugged") have undefined content and must + * not get migrated, because even reading these pages for migration might + * result in undesired behavior. + * + * Returns the number of cleared bits in the RAMBlock dirty bitmap. + * + * Note: The result is only stable while migrating (precopy/postcopy). + */ +static uint64_t ramblock_dirty_bitmap_clear_discarded_pages(RAMBlock *rb) +{ + uint64_t cleared_bits = 0; + + if (rb->mr && rb->bmap && memory_region_has_ram_discard_manager(rb->mr)) { + RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); + MemoryRegionSection section = { + .mr = rb->mr, + .offset_within_region = 0, + .size = int128_make64(qemu_ram_get_used_length(rb)), + }; + + ram_discard_manager_replay_discarded(rdm, §ion, + dirty_bitmap_clear_section, + &cleared_bits); + } + return cleared_bits; +} + +/* + * Check if a host-page aligned page falls into a discarded range as managed by + * a RamDiscardManager responsible for the mapped memory region of the RAMBlock. + * + * Note: The result is only stable while migrating (precopy/postcopy). + */ +bool ramblock_page_is_discarded(RAMBlock *rb, ram_addr_t start) +{ + if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { + RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); + MemoryRegionSection section = { + .mr = rb->mr, + .offset_within_region = start, + .size = int128_make64(qemu_ram_pagesize(rb)), + }; + + return !ram_discard_manager_is_populated(rdm, §ion); + } + return false; +} + /* Called with RCU critical section */ static void ramblock_sync_dirty_bitmap(RAMState *rs, RAMBlock *rb) { @@ -1566,25 +1685,68 @@ out: return ret; } +static inline void populate_read_range(RAMBlock *block, ram_addr_t offset, + ram_addr_t size) +{ + /* + * We read one byte of each page; this will preallocate page tables if + * required and populate the shared zeropage on MAP_PRIVATE anonymous memory + * where no page was populated yet. This might require adaption when + * supporting other mappings, like shmem. + */ + for (; offset < size; offset += block->page_size) { + char tmp = *((char *)block->host + offset); + + /* Don't optimize the read out */ + asm volatile("" : "+r" (tmp)); + } +} + +static inline int populate_read_section(MemoryRegionSection *section, + void *opaque) +{ + const hwaddr size = int128_get64(section->size); + hwaddr offset = section->offset_within_region; + RAMBlock *block = section->mr->ram_block; + + populate_read_range(block, offset, size); + return 0; +} + /* - * ram_block_populate_pages: populate memory in the RAM block by reading - * an integer from the beginning of each page. + * ram_block_populate_read: preallocate page tables and populate pages in the + * RAM block by reading a byte of each page. * * Since it's solely used for userfault_fd WP feature, here we just * hardcode page size to qemu_real_host_page_size. * * @block: RAM block to populate */ -static void ram_block_populate_pages(RAMBlock *block) +static void ram_block_populate_read(RAMBlock *rb) { - char *ptr = (char *) block->host; + /* + * Skip populating all pages that fall into a discarded range as managed by + * a RamDiscardManager responsible for the mapped memory region of the + * RAMBlock. Such discarded ("logically unplugged") parts of a RAMBlock + * must not get populated automatically. We don't have to track + * modifications via userfaultfd WP reliably, because these pages will + * not be part of the migration stream either way -- see + * ramblock_dirty_bitmap_exclude_discarded_pages(). + * + * Note: The result is only stable while migrating (precopy/postcopy). + */ + if (rb->mr && memory_region_has_ram_discard_manager(rb->mr)) { + RamDiscardManager *rdm = memory_region_get_ram_discard_manager(rb->mr); + MemoryRegionSection section = { + .mr = rb->mr, + .offset_within_region = 0, + .size = rb->mr->size, + }; - for (ram_addr_t offset = 0; offset < block->used_length; - offset += qemu_real_host_page_size) { - char tmp = *(ptr + offset); - - /* Don't optimize the read out */ - asm volatile("" : "+r" (tmp)); + ram_discard_manager_replay_populated(rdm, §ion, + populate_read_section, NULL); + } else { + populate_read_range(rb, 0, rb->used_length); } } @@ -1611,7 +1773,7 @@ void ram_write_tracking_prepare(void) * UFFDIO_WRITEPROTECT_MODE_WP mode setting would silently skip * pages with pte_none() entries in page table. */ - ram_block_populate_pages(block); + ram_block_populate_read(block); } } @@ -2218,7 +2380,14 @@ static void ram_save_cleanup(void *opaque) /* caller have hold iothread lock or is in a bh, so there is * no writing race against the migration bitmap */ - memory_global_dirty_log_stop(); + if (global_dirty_tracking & GLOBAL_DIRTY_MIGRATION) { + /* + * do not stop dirty log without starting it, since + * memory_global_dirty_log_stop will assert that + * memory_global_dirty_log_start/stop used in pairs + */ + memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION); + } } RAMBLOCK_FOREACH_NOT_IGNORED(block) { @@ -2670,6 +2839,19 @@ static void ram_list_init_bitmaps(void) } } +static void migration_bitmap_clear_discarded_pages(RAMState *rs) +{ + unsigned long pages; + RAMBlock *rb; + + RCU_READ_LOCK_GUARD(); + + RAMBLOCK_FOREACH_NOT_IGNORED(rb) { + pages = ramblock_dirty_bitmap_clear_discarded_pages(rb); + rs->migration_dirty_pages -= pages; + } +} + static void ram_init_bitmaps(RAMState *rs) { /* For memory_global_dirty_log_start below. */ @@ -2680,12 +2862,18 @@ static void ram_init_bitmaps(RAMState *rs) ram_list_init_bitmaps(); /* We don't use dirty log with background snapshots */ if (!migrate_background_snapshot()) { - memory_global_dirty_log_start(); + memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION); migration_bitmap_sync_precopy(rs); } } qemu_mutex_unlock_ramlist(); qemu_mutex_unlock_iothread(); + + /* + * After an eventual first bitmap sync, fixup the initial bitmap + * containing all 1s to exclude any discarded pages from migration. + */ + migration_bitmap_clear_discarded_pages(rs); } static int ram_init_all(RAMState **rsp) @@ -2777,8 +2965,7 @@ void qemu_guest_free_page_hint(void *addr, size_t len) * are initially set. Otherwise those skipped pages will be sent in * the next round after syncing from the memory region bitmap. */ - migration_clear_memory_region_dirty_bitmap_range(ram_state, block, - start, npages); + migration_clear_memory_region_dirty_bitmap_range(block, start, npages); ram_state->migration_dirty_pages -= bitmap_count_one_with_offset(block->bmap, start, npages); bitmap_clear(block->bmap, start, npages); @@ -3401,6 +3588,10 @@ int colo_init_ram_cache(void) } return -errno; } + if (!machine_dump_guest_core(current_machine)) { + qemu_madvise(block->colo_cache, block->used_length, + QEMU_MADV_DONTDUMP); + } } } @@ -3437,7 +3628,7 @@ void colo_incoming_start_dirty_log(void) /* Discard this dirty bitmap record */ bitmap_zero(block->bmap, block->max_length >> TARGET_PAGE_BITS); } - memory_global_dirty_log_start(); + memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION); } ram_state->migration_dirty_pages = 0; qemu_mutex_unlock_ramlist(); @@ -3449,7 +3640,7 @@ void colo_release_ram_cache(void) { RAMBlock *block; - memory_global_dirty_log_stop(); + memory_global_dirty_log_stop(GLOBAL_DIRTY_MIGRATION); RAMBLOCK_FOREACH_NOT_IGNORED(block) { g_free(block->bmap); block->bmap = NULL; @@ -3739,19 +3930,26 @@ void colo_flush_ram_cache(void) block = QLIST_FIRST_RCU(&ram_list.blocks); while (block) { - offset = migration_bitmap_find_dirty(ram_state, block, offset); + unsigned long num = 0; + offset = colo_bitmap_find_dirty(ram_state, block, offset, &num); if (!offset_in_ramblock(block, ((ram_addr_t)offset) << TARGET_PAGE_BITS)) { offset = 0; + num = 0; block = QLIST_NEXT_RCU(block, next); } else { - migration_bitmap_clear_dirty(ram_state, block, offset); + unsigned long i = 0; + + for (i = 0; i < num; i++) { + migration_bitmap_clear_dirty(ram_state, block, offset + i); + } dst_host = block->host + (((ram_addr_t)offset) << TARGET_PAGE_BITS); src_host = block->colo_cache + (((ram_addr_t)offset) << TARGET_PAGE_BITS); - memcpy(dst_host, src_host, TARGET_PAGE_SIZE); + memcpy(dst_host, src_host, TARGET_PAGE_SIZE * num); + offset += num; } } } @@ -4115,6 +4313,10 @@ int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block) */ bitmap_complement(block->bmap, block->bmap, nbits); + /* Clear dirty bits of discarded ranges that we don't want to migrate. */ + ramblock_dirty_bitmap_clear_discarded_pages(block); + + /* We'll recalculate migration_dirty_pages in ram_state_resume_prepare(). */ trace_ram_dirty_bitmap_reload_complete(block->idstr); /* @@ -4178,9 +4380,8 @@ static void ram_mig_ram_block_resized(RAMBlockNotifier *n, void *host, * Abort and indicate a proper reason. */ error_setg(&err, "RAM block '%s' resized during precopy.", rb->idstr); - migrate_set_error(migrate_get_current(), err); + migration_cancel(err); error_free(err); - migration_cancel(); } switch (ps) { diff --git a/migration/ram.h b/migration/ram.h index 4833e9fd5b..c515396a9a 100644 --- a/migration/ram.h +++ b/migration/ram.h @@ -50,6 +50,7 @@ bool ramblock_is_ignored(RAMBlock *block); int xbzrle_cache_resize(uint64_t new_size, Error **errp); uint64_t ram_bytes_remaining(void); uint64_t ram_bytes_total(void); +void mig_throttle_counter_reset(void); uint64_t ram_pagesize_summary(void); int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len); @@ -72,6 +73,7 @@ void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr, size_t nr); int64_t ramblock_recv_bitmap_send(QEMUFile *file, const char *block_name); int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *rb); +bool ramblock_page_is_discarded(RAMBlock *rb, ram_addr_t start); /* ram cache */ int colo_init_ram_cache(void); diff --git a/migration/rdma.c b/migration/rdma.c index 5c2d113aa9..f5d3bbe7e9 100644 --- a/migration/rdma.c +++ b/migration/rdma.c @@ -358,9 +358,11 @@ typedef struct RDMAContext { struct ibv_context *verbs; struct rdma_event_channel *channel; struct ibv_qp *qp; /* queue pair */ - struct ibv_comp_channel *comp_channel; /* completion channel */ + struct ibv_comp_channel *recv_comp_channel; /* recv completion channel */ + struct ibv_comp_channel *send_comp_channel; /* send completion channel */ struct ibv_pd *pd; /* protection domain */ - struct ibv_cq *cq; /* completion queue */ + struct ibv_cq *recv_cq; /* recvieve completion queue */ + struct ibv_cq *send_cq; /* send completion queue */ /* * If a previous write failed (perhaps because of a failed @@ -1059,21 +1061,34 @@ static int qemu_rdma_alloc_pd_cq(RDMAContext *rdma) return -1; } - /* create completion channel */ - rdma->comp_channel = ibv_create_comp_channel(rdma->verbs); - if (!rdma->comp_channel) { - error_report("failed to allocate completion channel"); + /* create receive completion channel */ + rdma->recv_comp_channel = ibv_create_comp_channel(rdma->verbs); + if (!rdma->recv_comp_channel) { + error_report("failed to allocate receive completion channel"); goto err_alloc_pd_cq; } /* - * Completion queue can be filled by both read and write work requests, - * so must reflect the sum of both possible queue sizes. + * Completion queue can be filled by read work requests. */ - rdma->cq = ibv_create_cq(rdma->verbs, (RDMA_SIGNALED_SEND_MAX * 3), - NULL, rdma->comp_channel, 0); - if (!rdma->cq) { - error_report("failed to allocate completion queue"); + rdma->recv_cq = ibv_create_cq(rdma->verbs, (RDMA_SIGNALED_SEND_MAX * 3), + NULL, rdma->recv_comp_channel, 0); + if (!rdma->recv_cq) { + error_report("failed to allocate receive completion queue"); + goto err_alloc_pd_cq; + } + + /* create send completion channel */ + rdma->send_comp_channel = ibv_create_comp_channel(rdma->verbs); + if (!rdma->send_comp_channel) { + error_report("failed to allocate send completion channel"); + goto err_alloc_pd_cq; + } + + rdma->send_cq = ibv_create_cq(rdma->verbs, (RDMA_SIGNALED_SEND_MAX * 3), + NULL, rdma->send_comp_channel, 0); + if (!rdma->send_cq) { + error_report("failed to allocate send completion queue"); goto err_alloc_pd_cq; } @@ -1083,11 +1098,19 @@ err_alloc_pd_cq: if (rdma->pd) { ibv_dealloc_pd(rdma->pd); } - if (rdma->comp_channel) { - ibv_destroy_comp_channel(rdma->comp_channel); + if (rdma->recv_comp_channel) { + ibv_destroy_comp_channel(rdma->recv_comp_channel); + } + if (rdma->send_comp_channel) { + ibv_destroy_comp_channel(rdma->send_comp_channel); + } + if (rdma->recv_cq) { + ibv_destroy_cq(rdma->recv_cq); + rdma->recv_cq = NULL; } rdma->pd = NULL; - rdma->comp_channel = NULL; + rdma->recv_comp_channel = NULL; + rdma->send_comp_channel = NULL; return -1; } @@ -1104,8 +1127,8 @@ static int qemu_rdma_alloc_qp(RDMAContext *rdma) attr.cap.max_recv_wr = 3; attr.cap.max_send_sge = 1; attr.cap.max_recv_sge = 1; - attr.send_cq = rdma->cq; - attr.recv_cq = rdma->cq; + attr.send_cq = rdma->send_cq; + attr.recv_cq = rdma->recv_cq; attr.qp_type = IBV_QPT_RC; ret = rdma_create_qp(rdma->cm_id, rdma->pd, &attr); @@ -1117,19 +1140,82 @@ static int qemu_rdma_alloc_qp(RDMAContext *rdma) return 0; } +/* Check whether On-Demand Paging is supported by RDAM device */ +static bool rdma_support_odp(struct ibv_context *dev) +{ + struct ibv_device_attr_ex attr = {0}; + int ret = ibv_query_device_ex(dev, NULL, &attr); + if (ret) { + return false; + } + + if (attr.odp_caps.general_caps & IBV_ODP_SUPPORT) { + return true; + } + + return false; +} + +/* + * ibv_advise_mr to avoid RNR NAK error as far as possible. + * The responder mr registering with ODP will sent RNR NAK back to + * the requester in the face of the page fault. + */ +static void qemu_rdma_advise_prefetch_mr(struct ibv_pd *pd, uint64_t addr, + uint32_t len, uint32_t lkey, + const char *name, bool wr) +{ +#ifdef HAVE_IBV_ADVISE_MR + int ret; + int advice = wr ? IBV_ADVISE_MR_ADVICE_PREFETCH_WRITE : + IBV_ADVISE_MR_ADVICE_PREFETCH; + struct ibv_sge sg_list = {.lkey = lkey, .addr = addr, .length = len}; + + ret = ibv_advise_mr(pd, advice, + IBV_ADVISE_MR_FLAG_FLUSH, &sg_list, 1); + /* ignore the error */ + if (ret) { + trace_qemu_rdma_advise_mr(name, len, addr, strerror(errno)); + } else { + trace_qemu_rdma_advise_mr(name, len, addr, "successed"); + } +#endif +} + static int qemu_rdma_reg_whole_ram_blocks(RDMAContext *rdma) { int i; RDMALocalBlocks *local = &rdma->local_ram_blocks; for (i = 0; i < local->nb_blocks; i++) { + int access = IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE; + local->block[i].mr = ibv_reg_mr(rdma->pd, local->block[i].local_host_addr, - local->block[i].length, - IBV_ACCESS_LOCAL_WRITE | - IBV_ACCESS_REMOTE_WRITE + local->block[i].length, access ); + + if (!local->block[i].mr && + errno == ENOTSUP && rdma_support_odp(rdma->verbs)) { + access |= IBV_ACCESS_ON_DEMAND; + /* register ODP mr */ + local->block[i].mr = + ibv_reg_mr(rdma->pd, + local->block[i].local_host_addr, + local->block[i].length, access); + trace_qemu_rdma_register_odp_mr(local->block[i].block_name); + + if (local->block[i].mr) { + qemu_rdma_advise_prefetch_mr(rdma->pd, + (uintptr_t)local->block[i].local_host_addr, + local->block[i].length, + local->block[i].mr->lkey, + local->block[i].block_name, + true); + } + } + if (!local->block[i].mr) { perror("Failed to register local dest ram block!"); break; @@ -1215,28 +1301,40 @@ static int qemu_rdma_register_and_get_keys(RDMAContext *rdma, */ if (!block->pmr[chunk]) { uint64_t len = chunk_end - chunk_start; + int access = rkey ? IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE : + 0; trace_qemu_rdma_register_and_get_keys(len, chunk_start); - block->pmr[chunk] = ibv_reg_mr(rdma->pd, - chunk_start, len, - (rkey ? (IBV_ACCESS_LOCAL_WRITE | - IBV_ACCESS_REMOTE_WRITE) : 0)); + block->pmr[chunk] = ibv_reg_mr(rdma->pd, chunk_start, len, access); + if (!block->pmr[chunk] && + errno == ENOTSUP && rdma_support_odp(rdma->verbs)) { + access |= IBV_ACCESS_ON_DEMAND; + /* register ODP mr */ + block->pmr[chunk] = ibv_reg_mr(rdma->pd, chunk_start, len, access); + trace_qemu_rdma_register_odp_mr(block->block_name); - if (!block->pmr[chunk]) { - perror("Failed to register chunk!"); - fprintf(stderr, "Chunk details: block: %d chunk index %d" - " start %" PRIuPTR " end %" PRIuPTR - " host %" PRIuPTR - " local %" PRIuPTR " registrations: %d\n", - block->index, chunk, (uintptr_t)chunk_start, - (uintptr_t)chunk_end, host_addr, - (uintptr_t)block->local_host_addr, - rdma->total_registrations); - return -1; + if (block->pmr[chunk]) { + qemu_rdma_advise_prefetch_mr(rdma->pd, (uintptr_t)chunk_start, + len, block->pmr[chunk]->lkey, + block->block_name, rkey); + + } } - rdma->total_registrations++; } + if (!block->pmr[chunk]) { + perror("Failed to register chunk!"); + fprintf(stderr, "Chunk details: block: %d chunk index %d" + " start %" PRIuPTR " end %" PRIuPTR + " host %" PRIuPTR + " local %" PRIuPTR " registrations: %d\n", + block->index, chunk, (uintptr_t)chunk_start, + (uintptr_t)chunk_end, host_addr, + (uintptr_t)block->local_host_addr, + rdma->total_registrations); + return -1; + } + rdma->total_registrations++; if (lkey) { *lkey = block->pmr[chunk]->lkey; @@ -1421,14 +1519,14 @@ static void qemu_rdma_signal_unregister(RDMAContext *rdma, uint64_t index, * (of any kind) has completed. * Return the work request ID that completed. */ -static uint64_t qemu_rdma_poll(RDMAContext *rdma, uint64_t *wr_id_out, - uint32_t *byte_len) +static uint64_t qemu_rdma_poll(RDMAContext *rdma, struct ibv_cq *cq, + uint64_t *wr_id_out, uint32_t *byte_len) { int ret; struct ibv_wc wc; uint64_t wr_id; - ret = ibv_poll_cq(rdma->cq, 1, &wc); + ret = ibv_poll_cq(cq, 1, &wc); if (!ret) { *wr_id_out = RDMA_WRID_NONE; @@ -1500,7 +1598,8 @@ static uint64_t qemu_rdma_poll(RDMAContext *rdma, uint64_t *wr_id_out, /* Wait for activity on the completion channel. * Returns 0 on success, none-0 on error. */ -static int qemu_rdma_wait_comp_channel(RDMAContext *rdma) +static int qemu_rdma_wait_comp_channel(RDMAContext *rdma, + struct ibv_comp_channel *comp_channel) { struct rdma_cm_event *cm_event; int ret = -1; @@ -1511,7 +1610,7 @@ static int qemu_rdma_wait_comp_channel(RDMAContext *rdma) */ if (rdma->migration_started_on_destination && migration_incoming_get_current()->state == MIGRATION_STATUS_ACTIVE) { - yield_until_fd_readable(rdma->comp_channel->fd); + yield_until_fd_readable(comp_channel->fd); } else { /* This is the source side, we're in a separate thread * or destination prior to migration_fd_process_incoming() @@ -1522,7 +1621,7 @@ static int qemu_rdma_wait_comp_channel(RDMAContext *rdma) */ while (!rdma->error_state && !rdma->received_error) { GPollFD pfds[2]; - pfds[0].fd = rdma->comp_channel->fd; + pfds[0].fd = comp_channel->fd; pfds[0].events = G_IO_IN | G_IO_HUP | G_IO_ERR; pfds[0].revents = 0; @@ -1580,6 +1679,17 @@ static int qemu_rdma_wait_comp_channel(RDMAContext *rdma) return rdma->error_state; } +static struct ibv_comp_channel *to_channel(RDMAContext *rdma, int wrid) +{ + return wrid < RDMA_WRID_RECV_CONTROL ? rdma->send_comp_channel : + rdma->recv_comp_channel; +} + +static struct ibv_cq *to_cq(RDMAContext *rdma, int wrid) +{ + return wrid < RDMA_WRID_RECV_CONTROL ? rdma->send_cq : rdma->recv_cq; +} + /* * Block until the next work request has completed. * @@ -1600,13 +1710,15 @@ static int qemu_rdma_block_for_wrid(RDMAContext *rdma, int wrid_requested, struct ibv_cq *cq; void *cq_ctx; uint64_t wr_id = RDMA_WRID_NONE, wr_id_in; + struct ibv_comp_channel *ch = to_channel(rdma, wrid_requested); + struct ibv_cq *poll_cq = to_cq(rdma, wrid_requested); - if (ibv_req_notify_cq(rdma->cq, 0)) { + if (ibv_req_notify_cq(poll_cq, 0)) { return -1; } /* poll cq first */ while (wr_id != wrid_requested) { - ret = qemu_rdma_poll(rdma, &wr_id_in, byte_len); + ret = qemu_rdma_poll(rdma, poll_cq, &wr_id_in, byte_len); if (ret < 0) { return ret; } @@ -1627,12 +1739,12 @@ static int qemu_rdma_block_for_wrid(RDMAContext *rdma, int wrid_requested, } while (1) { - ret = qemu_rdma_wait_comp_channel(rdma); + ret = qemu_rdma_wait_comp_channel(rdma, ch); if (ret) { goto err_block_for_wrid; } - ret = ibv_get_cq_event(rdma->comp_channel, &cq, &cq_ctx); + ret = ibv_get_cq_event(ch, &cq, &cq_ctx); if (ret) { perror("ibv_get_cq_event"); goto err_block_for_wrid; @@ -1646,7 +1758,7 @@ static int qemu_rdma_block_for_wrid(RDMAContext *rdma, int wrid_requested, } while (wr_id != wrid_requested) { - ret = qemu_rdma_poll(rdma, &wr_id_in, byte_len); + ret = qemu_rdma_poll(rdma, poll_cq, &wr_id_in, byte_len); if (ret < 0) { goto err_block_for_wrid; } @@ -2362,13 +2474,21 @@ static void qemu_rdma_cleanup(RDMAContext *rdma) rdma_destroy_qp(rdma->cm_id); rdma->qp = NULL; } - if (rdma->cq) { - ibv_destroy_cq(rdma->cq); - rdma->cq = NULL; + if (rdma->recv_cq) { + ibv_destroy_cq(rdma->recv_cq); + rdma->recv_cq = NULL; } - if (rdma->comp_channel) { - ibv_destroy_comp_channel(rdma->comp_channel); - rdma->comp_channel = NULL; + if (rdma->send_cq) { + ibv_destroy_cq(rdma->send_cq); + rdma->send_cq = NULL; + } + if (rdma->recv_comp_channel) { + ibv_destroy_comp_channel(rdma->recv_comp_channel); + rdma->recv_comp_channel = NULL; + } + if (rdma->send_comp_channel) { + ibv_destroy_comp_channel(rdma->send_comp_channel); + rdma->send_comp_channel = NULL; } if (rdma->pd) { ibv_dealloc_pd(rdma->pd); @@ -3040,10 +3160,14 @@ static void qio_channel_rdma_set_aio_fd_handler(QIOChannel *ioc, { QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(ioc); if (io_read) { - aio_set_fd_handler(ctx, rioc->rdmain->comp_channel->fd, + aio_set_fd_handler(ctx, rioc->rdmain->recv_comp_channel->fd, + false, io_read, io_write, NULL, opaque); + aio_set_fd_handler(ctx, rioc->rdmain->send_comp_channel->fd, false, io_read, io_write, NULL, opaque); } else { - aio_set_fd_handler(ctx, rioc->rdmaout->comp_channel->fd, + aio_set_fd_handler(ctx, rioc->rdmaout->recv_comp_channel->fd, + false, io_read, io_write, NULL, opaque); + aio_set_fd_handler(ctx, rioc->rdmaout->send_comp_channel->fd, false, io_read, io_write, NULL, opaque); } } @@ -3257,7 +3381,22 @@ static size_t qemu_rdma_save_page(QEMUFile *f, void *opaque, */ while (1) { uint64_t wr_id, wr_id_in; - int ret = qemu_rdma_poll(rdma, &wr_id_in, NULL); + int ret = qemu_rdma_poll(rdma, rdma->recv_cq, &wr_id_in, NULL); + if (ret < 0) { + error_report("rdma migration: polling error! %d", ret); + goto err; + } + + wr_id = wr_id_in & RDMA_WRID_TYPE_MASK; + + if (wr_id == RDMA_WRID_NONE) { + break; + } + } + + while (1) { + uint64_t wr_id, wr_id_in; + int ret = qemu_rdma_poll(rdma, rdma->send_cq, &wr_id_in, NULL); if (ret < 0) { error_report("rdma migration: polling error! %d", ret); goto err; diff --git a/migration/savevm.c b/migration/savevm.c index 7b7b64bd13..d59e976d50 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -1567,6 +1567,7 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp) migrate_init(ms); memset(&ram_counters, 0, sizeof(ram_counters)); + memset(&compression_counters, 0, sizeof(compression_counters)); ms->to_dst_file = f; qemu_mutex_unlock_iothread(); diff --git a/migration/trace-events b/migration/trace-events index a1c0f034ab..b48d873b8a 100644 --- a/migration/trace-events +++ b/migration/trace-events @@ -212,6 +212,8 @@ qemu_rdma_poll_write(const char *compstr, int64_t comp, int left, uint64_t block qemu_rdma_poll_other(const char *compstr, int64_t comp, int left) "other completion %s (%" PRId64 ") received left %d" qemu_rdma_post_send_control(const char *desc) "CONTROL: sending %s.." qemu_rdma_register_and_get_keys(uint64_t len, void *start) "Registering %" PRIu64 " bytes @ %p" +qemu_rdma_register_odp_mr(const char *name) "Try to register On-Demand Paging memory region: %s" +qemu_rdma_advise_mr(const char *name, uint32_t len, uint64_t addr, const char *res) "Try to advise block %s prefetch at %" PRIu32 "@0x%" PRIx64 ": %s" qemu_rdma_registration_handle_compress(int64_t length, int index, int64_t offset) "Zapping zero chunk: %" PRId64 " bytes, index %d, offset %" PRId64 qemu_rdma_registration_handle_finished(void) "" qemu_rdma_registration_handle_ram_blocks(void) "" @@ -331,6 +333,8 @@ get_ramblock_vfn_hash(const char *idstr, uint64_t vfn, uint32_t crc) "ramblock n calc_page_dirty_rate(const char *idstr, uint32_t new_crc, uint32_t old_crc) "ramblock name: %s, new crc: %" PRIu32 ", old crc: %" PRIu32 skip_sample_ramblock(const char *idstr, uint64_t ramblock_size) "ramblock name: %s, ramblock size: %" PRIu64 find_page_matched(const char *idstr) "ramblock %s addr or size changed" +dirtyrate_calculate(int64_t dirtyrate) "dirty rate: %" PRIi64 " MB/s" +dirtyrate_do_calculate_vcpu(int idx, uint64_t rate) "vcpu[%d]: %"PRIu64 " MB/s" # block.c migration_block_init_shared(const char *blk_device_name) "Start migration for %s with shared base image" diff --git a/monitor/hmp-cmds.c b/monitor/hmp-cmds.c index b5e71d9e6f..9c91bf93e9 100644 --- a/monitor/hmp-cmds.c +++ b/monitor/hmp-cmds.c @@ -52,9 +52,7 @@ #include "ui/console.h" #include "qemu/cutils.h" #include "qemu/error-report.h" -#include "exec/ramlist.h" #include "hw/intc/intc.h" -#include "hw/rdma/rdma.h" #include "migration/snapshot.h" #include "migration/misc.h" @@ -62,11 +60,13 @@ #include #endif -void hmp_handle_error(Monitor *mon, Error *err) +bool hmp_handle_error(Monitor *mon, Error *err) { if (err) { error_reportf_err(err, "Error: "); + return true; } + return false; } /* @@ -577,8 +577,7 @@ void hmp_info_vnc(Monitor *mon, const QDict *qdict) info2l = qmp_query_vnc_servers(&err); info2l_head = info2l; - if (err) { - hmp_handle_error(mon, err); + if (hmp_handle_error(mon, err)) { return; } if (!info2l) { @@ -693,8 +692,7 @@ void hmp_info_balloon(Monitor *mon, const QDict *qdict) Error *err = NULL; info = qmp_query_balloon(&err); - if (err) { - hmp_handle_error(mon, err); + if (hmp_handle_error(mon, err)) { return; } @@ -786,44 +784,6 @@ static void hmp_info_pci_device(Monitor *mon, const PciDeviceInfo *dev) } } -static int hmp_info_irq_foreach(Object *obj, void *opaque) -{ - InterruptStatsProvider *intc; - InterruptStatsProviderClass *k; - Monitor *mon = opaque; - - if (object_dynamic_cast(obj, TYPE_INTERRUPT_STATS_PROVIDER)) { - intc = INTERRUPT_STATS_PROVIDER(obj); - k = INTERRUPT_STATS_PROVIDER_GET_CLASS(obj); - uint64_t *irq_counts; - unsigned int nb_irqs, i; - if (k->get_statistics && - k->get_statistics(intc, &irq_counts, &nb_irqs)) { - if (nb_irqs > 0) { - monitor_printf(mon, "IRQ statistics for %s:\n", - object_get_typename(obj)); - for (i = 0; i < nb_irqs; i++) { - if (irq_counts[i] > 0) { - monitor_printf(mon, "%2d: %" PRId64 "\n", i, - irq_counts[i]); - } - } - } - } else { - monitor_printf(mon, "IRQ statistics not available for %s.\n", - object_get_typename(obj)); - } - } - - return 0; -} - -void hmp_info_irq(Monitor *mon, const QDict *qdict) -{ - object_child_foreach_recursive(object_get_root(), - hmp_info_irq_foreach, mon); -} - static int hmp_info_pic_foreach(Object *obj, void *opaque) { InterruptStatsProvider *intc; @@ -850,32 +810,6 @@ void hmp_info_pic(Monitor *mon, const QDict *qdict) hmp_info_pic_foreach, mon); } -static int hmp_info_rdma_foreach(Object *obj, void *opaque) -{ - RdmaProvider *rdma; - RdmaProviderClass *k; - Monitor *mon = opaque; - - if (object_dynamic_cast(obj, INTERFACE_RDMA_PROVIDER)) { - rdma = RDMA_PROVIDER(obj); - k = RDMA_PROVIDER_GET_CLASS(obj); - if (k->print_statistics) { - k->print_statistics(mon, rdma); - } else { - monitor_printf(mon, "RDMA statistics not available for %s.\n", - object_get_typename(obj)); - } - } - - return 0; -} - -void hmp_info_rdma(Monitor *mon, const QDict *qdict) -{ - object_child_foreach_recursive(object_get_root(), - hmp_info_rdma_foreach, mon); -} - void hmp_info_pci(Monitor *mon, const QDict *qdict) { PciInfoList *info_list, *info; @@ -1065,8 +999,7 @@ void hmp_ringbuf_read(Monitor *mon, const QDict *qdict) int i; data = qmp_ringbuf_read(chardev, size, false, 0, &err); - if (err) { - hmp_handle_error(mon, err); + if (hmp_handle_error(mon, err)) { return; } @@ -1582,8 +1515,7 @@ void hmp_migrate(Monitor *mon, const QDict *qdict) qmp_migrate(uri, !!blk, blk, !!inc, inc, false, false, true, resume, &err); - if (err) { - hmp_handle_error(mon, err); + if (hmp_handle_error(mon, err)) { return; } @@ -1823,6 +1755,7 @@ void hmp_info_memory_devices(Monitor *mon, const QDict *qdict) VirtioMEMDeviceInfo *vmi; MemoryDeviceInfo *value; PCDIMMDeviceInfo *di; + SgxEPCDeviceInfo *se; for (info = info_list; info; info = info->next) { value = info->value; @@ -1870,6 +1803,15 @@ void hmp_info_memory_devices(Monitor *mon, const QDict *qdict) vmi->block_size); monitor_printf(mon, " memdev: %s\n", vmi->memdev); break; + case MEMORY_DEVICE_INFO_KIND_SGX_EPC: + se = value->u.sgx_epc.data; + monitor_printf(mon, "Memory device [%s]: \"%s\"\n", + MemoryDeviceInfoKind_str(value->type), + se->id ? se->id : ""); + monitor_printf(mon, " memaddr: 0x%" PRIx64 "\n", se->memaddr); + monitor_printf(mon, " size: %" PRIu64 "\n", se->size); + monitor_printf(mon, " memdev: %s\n", se->memdev); + break; default: g_assert_not_reached(); } @@ -1907,8 +1849,7 @@ void hmp_rocker(Monitor *mon, const QDict *qdict) Error *err = NULL; rocker = qmp_query_rocker(name, &err); - if (err != NULL) { - hmp_handle_error(mon, err); + if (hmp_handle_error(mon, err)) { return; } @@ -1926,8 +1867,7 @@ void hmp_rocker_ports(Monitor *mon, const QDict *qdict) Error *err = NULL; list = qmp_query_rocker_ports(name, &err); - if (err != NULL) { - hmp_handle_error(mon, err); + if (hmp_handle_error(mon, err)) { return; } @@ -1935,7 +1875,7 @@ void hmp_rocker_ports(Monitor *mon, const QDict *qdict) monitor_printf(mon, " port link duplex neg?\n"); for (port = list; port; port = port->next) { - monitor_printf(mon, "%10s %-4s %-3s %2s %-3s\n", + monitor_printf(mon, "%10s %-4s %-3s %2s %s\n", port->value->name, port->value->enabled ? port->value->link_up ? "up" : "down" : "!ena", @@ -1955,8 +1895,7 @@ void hmp_rocker_of_dpa_flows(Monitor *mon, const QDict *qdict) Error *err = NULL; list = qmp_query_rocker_of_dpa_flows(name, tbl_id != -1, tbl_id, &err); - if (err != NULL) { - hmp_handle_error(mon, err); + if (hmp_handle_error(mon, err)) { return; } @@ -2105,8 +2044,7 @@ void hmp_rocker_of_dpa_groups(Monitor *mon, const QDict *qdict) Error *err = NULL; list = qmp_query_rocker_of_dpa_groups(name, type != 9, type, &err); - if (err != NULL) { - hmp_handle_error(mon, err); + if (hmp_handle_error(mon, err)) { return; } @@ -2199,11 +2137,6 @@ void hmp_rocker_of_dpa_groups(Monitor *mon, const QDict *qdict) qapi_free_RockerOfDpaGroupList(list); } -void hmp_info_ramblock(Monitor *mon, const QDict *qdict) -{ - ram_block_dump(mon); -} - void hmp_info_vm_generation_id(Monitor *mon, const QDict *qdict) { Error *err = NULL; diff --git a/monitor/hmp.c b/monitor/hmp.c index d50c3124e1..b20737e63c 100644 --- a/monitor/hmp.c +++ b/monitor/hmp.c @@ -26,6 +26,7 @@ #include #include "hw/qdev-core.h" #include "monitor-internal.h" +#include "monitor/hmp.h" #include "qapi/error.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp/qnum.h" @@ -1061,6 +1062,31 @@ fail: return NULL; } +static void hmp_info_human_readable_text(Monitor *mon, + HumanReadableText *(*handler)(Error **)) +{ + Error *err = NULL; + g_autoptr(HumanReadableText) info = handler(&err); + + if (hmp_handle_error(mon, err)) { + return; + } + + monitor_printf(mon, "%s", info->human_readable_text); +} + +static void handle_hmp_command_exec(Monitor *mon, + const HMPCommand *cmd, + QDict *qdict) +{ + if (cmd->cmd_info_hrt) { + hmp_info_human_readable_text(mon, + cmd->cmd_info_hrt); + } else { + cmd->cmd(mon, qdict); + } +} + typedef struct HandleHmpCommandCo { Monitor *mon; const HMPCommand *cmd; @@ -1071,7 +1097,7 @@ typedef struct HandleHmpCommandCo { static void handle_hmp_command_co(void *opaque) { HandleHmpCommandCo *data = opaque; - data->cmd->cmd(data->mon, data->qdict); + handle_hmp_command_exec(data->mon, data->cmd, data->qdict); monitor_set_cur(qemu_coroutine_self(), NULL); data->done = true; } @@ -1089,7 +1115,7 @@ void handle_hmp_command(MonitorHMP *mon, const char *cmdline) return; } - if (!cmd->cmd) { + if (!cmd->cmd && !cmd->cmd_info_hrt) { /* FIXME: is it useful to try autoload modules here ??? */ monitor_printf(&mon->common, "Command \"%.*s\" is not available.\n", (int)(cmdline - cmd_start), cmd_start); @@ -1109,7 +1135,7 @@ void handle_hmp_command(MonitorHMP *mon, const char *cmdline) if (!cmd->coroutine) { /* old_mon is non-NULL when called from qmp_human_monitor_command() */ Monitor *old_mon = monitor_set_cur(qemu_coroutine_self(), &mon->common); - cmd->cmd(&mon->common, qdict); + handle_hmp_command_exec(&mon->common, cmd, qdict); monitor_set_cur(qemu_coroutine_self(), old_mon); } else { HandleHmpCommandCo data = { diff --git a/monitor/misc.c b/monitor/misc.c index ffe7966870..a3a6e47844 100644 --- a/monitor/misc.c +++ b/monitor/misc.c @@ -70,7 +70,9 @@ #include "qapi/qapi-commands-migration.h" #include "qapi/qapi-commands-misc.h" #include "qapi/qapi-commands-qom.h" +#include "qapi/qapi-commands-run-state.h" #include "qapi/qapi-commands-trace.h" +#include "qapi/qapi-commands-machine.h" #include "qapi/qapi-init-commands.h" #include "qapi/error.h" #include "qapi/qmp-event.h" @@ -230,12 +232,13 @@ static void monitor_init_qmp_commands(void) qmp_init_marshal(&qmp_commands); - qmp_register_command(&qmp_commands, "device_add", qmp_device_add, - QCO_NO_OPTIONS); + qmp_register_command(&qmp_commands, "device_add", + qmp_device_add, 0, 0); QTAILQ_INIT(&qmp_cap_negotiation_commands); qmp_register_command(&qmp_cap_negotiation_commands, "qmp_capabilities", - qmp_marshal_qmp_capabilities, QCO_ALLOW_PRECONFIG); + qmp_marshal_qmp_capabilities, + QCO_ALLOW_PRECONFIG, 0); } /* Set the current CPU defined by the user. Callers must hold BQL. */ @@ -470,10 +473,18 @@ static void hmp_gdbserver(Monitor *mon, const QDict *qdict) static void hmp_watchdog_action(Monitor *mon, const QDict *qdict) { - const char *action = qdict_get_str(qdict, "action"); - if (select_watchdog_action(action) == -1) { - monitor_printf(mon, "Unknown watchdog action '%s'\n", action); + Error *err = NULL; + WatchdogAction action; + char *qapi_value; + + qapi_value = g_ascii_strdown(qdict_get_str(qdict, "action"), -1); + action = qapi_enum_parse(&WatchdogAction_lookup, qapi_value, -1, &err); + g_free(qapi_value); + if (err) { + hmp_handle_error(mon, err); + return; } + qmp_watchdog_set_action(action, &error_abort); } static void monitor_printc(Monitor *mon, int c) @@ -929,33 +940,6 @@ static void hmp_info_mtree(Monitor *mon, const QDict *qdict) mtree_info(flatview, dispatch_tree, owner, disabled); } -#ifdef CONFIG_PROFILER - -int64_t dev_time; - -static void hmp_info_profile(Monitor *mon, const QDict *qdict) -{ - static int64_t last_cpu_exec_time; - int64_t cpu_exec_time; - int64_t delta; - - cpu_exec_time = tcg_cpu_exec_time(); - delta = cpu_exec_time - last_cpu_exec_time; - - monitor_printf(mon, "async time %" PRId64 " (%0.3f)\n", - dev_time, dev_time / (double)NANOSECONDS_PER_SECOND); - monitor_printf(mon, "qemu time %" PRId64 " (%0.3f)\n", - delta, delta / (double)NANOSECONDS_PER_SECOND); - last_cpu_exec_time = cpu_exec_time; - dev_time = 0; -} -#else -static void hmp_info_profile(Monitor *mon, const QDict *qdict) -{ - monitor_printf(mon, "Internal profiler not compiled\n"); -} -#endif - /* Capture support */ static QLIST_HEAD (capture_list_head, CaptureState) capture_head; @@ -1963,7 +1947,7 @@ void monitor_register_hmp(const char *name, bool info, while (table->name != NULL) { if (strcmp(table->name, name) == 0) { - g_assert(table->cmd == NULL); + g_assert(table->cmd == NULL && table->cmd_info_hrt == NULL); table->cmd = cmd; return; } @@ -1972,6 +1956,22 @@ void monitor_register_hmp(const char *name, bool info, g_assert_not_reached(); } +void monitor_register_hmp_info_hrt(const char *name, + HumanReadableText *(*handler)(Error **errp)) +{ + HMPCommand *table = hmp_info_cmds; + + while (table->name != NULL) { + if (strcmp(table->name, name) == 0) { + g_assert(table->cmd == NULL && table->cmd_info_hrt == NULL); + table->cmd_info_hrt = handler; + return; + } + table++; + } + g_assert_not_reached(); +} + void monitor_init_globals(void) { monitor_init_globals_core(); diff --git a/monitor/monitor-internal.h b/monitor/monitor-internal.h index 9c3a09cb01..3da3f86c6a 100644 --- a/monitor/monitor-internal.h +++ b/monitor/monitor-internal.h @@ -74,6 +74,13 @@ typedef struct HMPCommand { const char *help; const char *flags; /* p=preconfig */ void (*cmd)(Monitor *mon, const QDict *qdict); + /* + * If implementing a command that takes no arguments and simply + * prints formatted data, then leave @cmd NULL, and then set + * @cmd_info_hrt to the corresponding QMP handler that returns + * the formatted text. + */ + HumanReadableText *(*cmd_info_hrt)(Error **errp); bool coroutine; /* * @sub_table is a list of 2nd level of commands. If it does not exist, diff --git a/monitor/monitor.c b/monitor/monitor.c index 46a171bca6..21c7a68758 100644 --- a/monitor/monitor.c +++ b/monitor/monitor.c @@ -474,6 +474,10 @@ static unsigned int qapi_event_throttle_hash(const void *key) hash += g_str_hash(qdict_get_str(evstate->data, "node-name")); } + if (evstate->event == QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE) { + hash += g_str_hash(qdict_get_str(evstate->data, "qom-path")); + } + return hash; } @@ -496,6 +500,11 @@ static gboolean qapi_event_throttle_equal(const void *a, const void *b) qdict_get_str(evb->data, "node-name")); } + if (eva->event == QAPI_EVENT_MEMORY_DEVICE_SIZE_CHANGE) { + return !strcmp(qdict_get_str(eva->data, "qom-path"), + qdict_get_str(evb->data, "qom-path")); + } + return TRUE; } diff --git a/monitor/qmp-cmds.c b/monitor/qmp-cmds.c index 5c0d5e116b..343353e27a 100644 --- a/monitor/qmp-cmds.c +++ b/monitor/qmp-cmds.c @@ -36,9 +36,13 @@ #include "qapi/qapi-commands-machine.h" #include "qapi/qapi-commands-misc.h" #include "qapi/qapi-commands-ui.h" +#include "qapi/type-helpers.h" #include "qapi/qmp/qerror.h" +#include "exec/ramlist.h" #include "hw/mem/memory-device.h" #include "hw/acpi/acpi_dev_interface.h" +#include "hw/intc/intc.h" +#include "hw/rdma/rdma.h" NameInfo *qmp_query_name(Error **errp) { @@ -350,3 +354,115 @@ void qmp_display_reload(DisplayReloadOptions *arg, Error **errp) abort(); } } + +#ifdef CONFIG_PROFILER + +int64_t dev_time; + +HumanReadableText *qmp_x_query_profile(Error **errp) +{ + g_autoptr(GString) buf = g_string_new(""); + static int64_t last_cpu_exec_time; + int64_t cpu_exec_time; + int64_t delta; + + cpu_exec_time = tcg_cpu_exec_time(); + delta = cpu_exec_time - last_cpu_exec_time; + + g_string_append_printf(buf, "async time %" PRId64 " (%0.3f)\n", + dev_time, dev_time / (double)NANOSECONDS_PER_SECOND); + g_string_append_printf(buf, "qemu time %" PRId64 " (%0.3f)\n", + delta, delta / (double)NANOSECONDS_PER_SECOND); + last_cpu_exec_time = cpu_exec_time; + dev_time = 0; + + return human_readable_text_from_str(buf); +} +#else +HumanReadableText *qmp_x_query_profile(Error **errp) +{ + error_setg(errp, "Internal profiler not compiled"); + return NULL; +} +#endif + +static int qmp_x_query_rdma_foreach(Object *obj, void *opaque) +{ + RdmaProvider *rdma; + RdmaProviderClass *k; + GString *buf = opaque; + + if (object_dynamic_cast(obj, INTERFACE_RDMA_PROVIDER)) { + rdma = RDMA_PROVIDER(obj); + k = RDMA_PROVIDER_GET_CLASS(obj); + if (k->format_statistics) { + k->format_statistics(rdma, buf); + } else { + g_string_append_printf(buf, + "RDMA statistics not available for %s.\n", + object_get_typename(obj)); + } + } + + return 0; +} + +HumanReadableText *qmp_x_query_rdma(Error **errp) +{ + g_autoptr(GString) buf = g_string_new(""); + + object_child_foreach_recursive(object_get_root(), + qmp_x_query_rdma_foreach, buf); + + return human_readable_text_from_str(buf); +} + +HumanReadableText *qmp_x_query_ramblock(Error **errp) +{ + g_autoptr(GString) buf = ram_block_format(); + + return human_readable_text_from_str(buf); +} + +static int qmp_x_query_irq_foreach(Object *obj, void *opaque) +{ + InterruptStatsProvider *intc; + InterruptStatsProviderClass *k; + GString *buf = opaque; + + if (object_dynamic_cast(obj, TYPE_INTERRUPT_STATS_PROVIDER)) { + intc = INTERRUPT_STATS_PROVIDER(obj); + k = INTERRUPT_STATS_PROVIDER_GET_CLASS(obj); + uint64_t *irq_counts; + unsigned int nb_irqs, i; + if (k->get_statistics && + k->get_statistics(intc, &irq_counts, &nb_irqs)) { + if (nb_irqs > 0) { + g_string_append_printf(buf, "IRQ statistics for %s:\n", + object_get_typename(obj)); + for (i = 0; i < nb_irqs; i++) { + if (irq_counts[i] > 0) { + g_string_append_printf(buf, "%2d: %" PRId64 "\n", i, + irq_counts[i]); + } + } + } + } else { + g_string_append_printf(buf, + "IRQ statistics not available for %s.\n", + object_get_typename(obj)); + } + } + + return 0; +} + +HumanReadableText *qmp_x_query_irq(Error **errp) +{ + g_autoptr(GString) buf = g_string_new(""); + + object_child_foreach_recursive(object_get_root(), + qmp_x_query_irq_foreach, buf); + + return human_readable_text_from_str(buf); +} diff --git a/nbd/client-connection.c b/nbd/client-connection.c index 7123b1e189..695f855754 100644 --- a/nbd/client-connection.c +++ b/nbd/client-connection.c @@ -318,6 +318,7 @@ nbd_co_establish_connection(NBDClientConnection *conn, NBDExportInfo *info, } if (!blocking) { + error_setg(errp, "No connection at the moment"); return NULL; } diff --git a/nbd/client.c b/nbd/client.c index 0c2db4bcba..30d5383cb1 100644 --- a/nbd/client.c +++ b/nbd/client.c @@ -1434,9 +1434,7 @@ nbd_read_eof(BlockDriverState *bs, QIOChannel *ioc, void *buffer, size_t size, len = qio_channel_readv(ioc, &iov, 1, errp); if (len == QIO_CHANNEL_ERR_BLOCK) { - bdrv_dec_in_flight(bs); qio_channel_yield(ioc, G_IO_IN); - bdrv_inc_in_flight(bs); continue; } else if (len < 0) { return -EIO; diff --git a/nbd/server.c b/nbd/server.c index 3927f7789d..6d03e8a4b4 100644 --- a/nbd/server.c +++ b/nbd/server.c @@ -980,7 +980,7 @@ static int nbd_negotiate_meta_queries(NBDClient *client, size_t i; size_t count = 0; - if (!client->structured_reply) { + if (client->opt == NBD_OPT_SET_META_CONTEXT && !client->structured_reply) { return nbd_opt_invalid(client, errp, "request option '%s' when structured reply " "is not negotiated", diff --git a/net/colo-compare.c b/net/colo-compare.c index b100e7b51f..b8876d7fd9 100644 --- a/net/colo-compare.c +++ b/net/colo-compare.c @@ -170,7 +170,7 @@ static bool packet_matches_str(const char *str, return false; } - return !memcmp(str, buf, strlen(str)); + return !memcmp(str, buf, packet_len); } static void notify_remote_frame(CompareState *s) @@ -264,7 +264,7 @@ static int packet_enqueue(CompareState *s, int mode, Connection **con) pkt = NULL; return -1; } - fill_connection_key(pkt, &key); + fill_connection_key(pkt, &key, false); conn = connection_get(s->connection_track_table, &key, diff --git a/net/colo.c b/net/colo.c index 3a3e6e89a0..1f8162f59f 100644 --- a/net/colo.c +++ b/net/colo.c @@ -83,19 +83,26 @@ int parse_packet_early(Packet *pkt) return 0; } -void extract_ip_and_port(uint32_t tmp_ports, ConnectionKey *key, Packet *pkt) +void extract_ip_and_port(uint32_t tmp_ports, ConnectionKey *key, + Packet *pkt, bool reverse) { + if (reverse) { + key->src = pkt->ip->ip_dst; + key->dst = pkt->ip->ip_src; + key->src_port = ntohs(tmp_ports & 0xffff); + key->dst_port = ntohs(tmp_ports >> 16); + } else { key->src = pkt->ip->ip_src; key->dst = pkt->ip->ip_dst; key->src_port = ntohs(tmp_ports >> 16); key->dst_port = ntohs(tmp_ports & 0xffff); + } } -void fill_connection_key(Packet *pkt, ConnectionKey *key) +void fill_connection_key(Packet *pkt, ConnectionKey *key, bool reverse) { - uint32_t tmp_ports; + uint32_t tmp_ports = 0; - memset(key, 0, sizeof(*key)); key->ip_proto = pkt->ip->ip_p; switch (key->ip_proto) { @@ -106,29 +113,15 @@ void fill_connection_key(Packet *pkt, ConnectionKey *key) case IPPROTO_SCTP: case IPPROTO_UDPLITE: tmp_ports = *(uint32_t *)(pkt->transport_header); - extract_ip_and_port(tmp_ports, key, pkt); break; case IPPROTO_AH: tmp_ports = *(uint32_t *)(pkt->transport_header + 4); - extract_ip_and_port(tmp_ports, key, pkt); break; default: break; } -} -void reverse_connection_key(ConnectionKey *key) -{ - struct in_addr tmp_ip; - uint16_t tmp_port; - - tmp_ip = key->src; - key->src = key->dst; - key->dst = tmp_ip; - - tmp_port = key->src_port; - key->src_port = key->dst_port; - key->dst_port = tmp_port; + extract_ip_and_port(tmp_ports, key, pkt, reverse); } Connection *connection_new(ConnectionKey *key) diff --git a/net/colo.h b/net/colo.h index d91cd245c4..8b3e8d5a83 100644 --- a/net/colo.h +++ b/net/colo.h @@ -89,9 +89,9 @@ typedef struct Connection { uint32_t connection_key_hash(const void *opaque); int connection_key_equal(const void *opaque1, const void *opaque2); int parse_packet_early(Packet *pkt); -void extract_ip_and_port(uint32_t tmp_ports, ConnectionKey *key, Packet *pkt); -void fill_connection_key(Packet *pkt, ConnectionKey *key); -void reverse_connection_key(ConnectionKey *key); +void extract_ip_and_port(uint32_t tmp_ports, ConnectionKey *key, + Packet *pkt, bool reverse); +void fill_connection_key(Packet *pkt, ConnectionKey *key, bool reverse); Connection *connection_new(ConnectionKey *key); void connection_destroy(void *opaque); Connection *connection_get(GHashTable *connection_track_table, diff --git a/net/filter-rewriter.c b/net/filter-rewriter.c index cb3a96cde1..bf05023dc3 100644 --- a/net/filter-rewriter.c +++ b/net/filter-rewriter.c @@ -279,15 +279,7 @@ static ssize_t colo_rewriter_receive_iov(NetFilterState *nf, */ if (pkt && is_tcp_packet(pkt)) { - fill_connection_key(pkt, &key); - - if (sender == nf->netdev) { - /* - * We need make tcp TX and RX packet - * into one connection. - */ - reverse_connection_key(&key); - } + fill_connection_key(pkt, &key, sender == nf->netdev); /* After failover we needn't change new TCP packet */ if (s->failover_mode && diff --git a/net/meson.build b/net/meson.build index 1076b0a7ab..847bc2ac85 100644 --- a/net/meson.build +++ b/net/meson.build @@ -18,10 +18,14 @@ softmmu_ss.add(files( softmmu_ss.add(when: 'CONFIG_TCG', if_true: files('filter-replay.c')) -softmmu_ss.add(when: 'CONFIG_L2TPV3', if_true: files('l2tpv3.c')) +if have_l2tpv3 + softmmu_ss.add(files('l2tpv3.c')) +endif softmmu_ss.add(when: slirp, if_true: files('slirp.c')) -softmmu_ss.add(when: ['CONFIG_VDE', vde], if_true: files('vde.c')) -softmmu_ss.add(when: 'CONFIG_NETMAP', if_true: files('netmap.c')) +softmmu_ss.add(when: vde, if_true: files('vde.c')) +if have_netmap + softmmu_ss.add(files('netmap.c')) +endif vhost_user_ss = ss.source_set() vhost_user_ss.add(when: 'CONFIG_VIRTIO_NET', if_true: files('vhost-user.c'), if_false: files('vhost-user-stub.c')) softmmu_ss.add_all(when: 'CONFIG_VHOST_NET_USER', if_true: vhost_user_ss) diff --git a/net/net.c b/net/net.c index 52c99196c6..f0d14dbfc1 100644 --- a/net/net.c +++ b/net/net.c @@ -239,7 +239,8 @@ static void qemu_net_client_setup(NetClientState *nc, NetClientState *peer, const char *model, const char *name, - NetClientDestructor *destructor) + NetClientDestructor *destructor, + bool is_datapath) { nc->info = info; nc->model = g_strdup(model); @@ -258,6 +259,7 @@ static void qemu_net_client_setup(NetClientState *nc, nc->incoming_queue = qemu_new_net_queue(qemu_deliver_packet_iov, nc); nc->destructor = destructor; + nc->is_datapath = is_datapath; QTAILQ_INIT(&nc->filters); } @@ -272,7 +274,23 @@ NetClientState *qemu_new_net_client(NetClientInfo *info, nc = g_malloc0(info->size); qemu_net_client_setup(nc, info, peer, model, name, - qemu_net_client_destructor); + qemu_net_client_destructor, true); + + return nc; +} + +NetClientState *qemu_new_net_control_client(NetClientInfo *info, + NetClientState *peer, + const char *model, + const char *name) +{ + NetClientState *nc; + + assert(info->size >= sizeof(NetClientState)); + + nc = g_malloc0(info->size); + qemu_net_client_setup(nc, info, peer, model, name, + qemu_net_client_destructor, false); return nc; } @@ -297,7 +315,7 @@ NICState *qemu_new_nic(NetClientInfo *info, for (i = 0; i < queues; i++) { qemu_net_client_setup(&nic->ncs[i], info, peers[i], model, name, - NULL); + NULL, true); nic->ncs[i].queue_index = i; } diff --git a/net/vhost-user.c b/net/vhost-user.c index 4a939124d2..b1a0247b59 100644 --- a/net/vhost-user.c +++ b/net/vhost-user.c @@ -198,6 +198,19 @@ static bool vhost_user_has_ufo(NetClientState *nc) return true; } +static bool vhost_user_check_peer_type(NetClientState *nc, ObjectClass *oc, + Error **errp) +{ + const char *driver = object_class_get_name(oc); + + if (!g_str_has_prefix(driver, "virtio-net-")) { + error_setg(errp, "vhost-user requires frontend driver virtio-net-*"); + return false; + } + + return true; +} + static NetClientInfo net_vhost_user_info = { .type = NET_CLIENT_DRIVER_VHOST_USER, .size = sizeof(NetVhostUserState), @@ -207,6 +220,7 @@ static NetClientInfo net_vhost_user_info = { .has_ufo = vhost_user_has_ufo, .set_vnet_be = vhost_user_set_vnet_endianness, .set_vnet_le = vhost_user_set_vnet_endianness, + .check_peer_type = vhost_user_check_peer_type, }; static gboolean net_vhost_user_watch(void *do_not_use, GIOCondition cond, @@ -397,27 +411,6 @@ static Chardev *net_vhost_claim_chardev( return chr; } -static int net_vhost_check_net(void *opaque, QemuOpts *opts, Error **errp) -{ - const char *name = opaque; - const char *driver, *netdev; - - driver = qemu_opt_get(opts, "driver"); - netdev = qemu_opt_get(opts, "netdev"); - - if (!driver || !netdev) { - return 0; - } - - if (strcmp(netdev, name) == 0 && - !g_str_has_prefix(driver, "virtio-net-")) { - error_setg(errp, "vhost-user requires frontend driver virtio-net-*"); - return -1; - } - - return 0; -} - int net_init_vhost_user(const Netdev *netdev, const char *name, NetClientState *peer, Error **errp) { @@ -433,12 +426,6 @@ int net_init_vhost_user(const Netdev *netdev, const char *name, return -1; } - /* verify net frontend */ - if (qemu_opts_foreach(qemu_find_opts("device"), net_vhost_check_net, - (char *)name, errp)) { - return -1; - } - queues = vhost_user_opts->has_queues ? vhost_user_opts->queues : 1; if (queues < 1 || queues > MAX_QUEUE_NUM) { error_setg(errp, diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index 912686457c..49ab322511 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -18,6 +18,7 @@ #include "qemu/error-report.h" #include "qemu/option.h" #include "qapi/error.h" +#include #include #include #include "standard-headers/linux/virtio_net.h" @@ -51,6 +52,14 @@ const int vdpa_feature_bits[] = { VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_MTU, + VIRTIO_NET_F_CTRL_RX, + VIRTIO_NET_F_CTRL_RX_EXTRA, + VIRTIO_NET_F_CTRL_VLAN, + VIRTIO_NET_F_GUEST_ANNOUNCE, + VIRTIO_NET_F_CTRL_MAC_ADDR, + VIRTIO_NET_F_RSS, + VIRTIO_NET_F_MQ, + VIRTIO_NET_F_CTRL_VQ, VIRTIO_F_IOMMU_PLATFORM, VIRTIO_F_RING_PACKED, VIRTIO_NET_F_RSS, @@ -81,7 +90,8 @@ static int vhost_vdpa_net_check_device_id(struct vhost_net *net) return ret; } -static int vhost_vdpa_add(NetClientState *ncs, void *be) +static int vhost_vdpa_add(NetClientState *ncs, void *be, + int queue_pair_index, int nvqs) { VhostNetOptions options; struct vhost_net *net = NULL; @@ -94,7 +104,7 @@ static int vhost_vdpa_add(NetClientState *ncs, void *be) options.net_backend = ncs; options.opaque = be; options.busyloop_timeout = 0; - options.nvqs = 2; + options.nvqs = nvqs; net = vhost_net_init(&options); if (!net) { @@ -147,67 +157,147 @@ static bool vhost_vdpa_has_ufo(NetClientState *nc) } +static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc, + Error **errp) +{ + const char *driver = object_class_get_name(oc); + + if (!g_str_has_prefix(driver, "virtio-net-")) { + error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*"); + return false; + } + + return true; +} + static NetClientInfo net_vhost_vdpa_info = { .type = NET_CLIENT_DRIVER_VHOST_VDPA, .size = sizeof(VhostVDPAState), .cleanup = vhost_vdpa_cleanup, .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, .has_ufo = vhost_vdpa_has_ufo, + .check_peer_type = vhost_vdpa_check_peer_type, }; -static int net_vhost_vdpa_init(NetClientState *peer, const char *device, - const char *name, const char *vhostdev) +static NetClientState *net_vhost_vdpa_init(NetClientState *peer, + const char *device, + const char *name, + int vdpa_device_fd, + int queue_pair_index, + int nvqs, + bool is_datapath) { NetClientState *nc = NULL; VhostVDPAState *s; - int vdpa_device_fd = -1; int ret = 0; assert(name); - nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device, name); + if (is_datapath) { + nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device, + name); + } else { + nc = qemu_new_net_control_client(&net_vhost_vdpa_info, peer, + device, name); + } snprintf(nc->info_str, sizeof(nc->info_str), TYPE_VHOST_VDPA); s = DO_UPCAST(VhostVDPAState, nc, nc); - vdpa_device_fd = qemu_open_old(vhostdev, O_RDWR); - if (vdpa_device_fd == -1) { - return -errno; - } + s->vhost_vdpa.device_fd = vdpa_device_fd; - ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa); + s->vhost_vdpa.index = queue_pair_index; + ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs); if (ret) { - qemu_close(vdpa_device_fd); qemu_del_net_client(nc); + return NULL; } - return ret; + return nc; } -static int net_vhost_check_net(void *opaque, QemuOpts *opts, Error **errp) +static int vhost_vdpa_get_max_queue_pairs(int fd, int *has_cvq, Error **errp) { - const char *name = opaque; - const char *driver, *netdev; + unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); + struct vhost_vdpa_config *config; + __virtio16 *max_queue_pairs; + uint64_t features; + int ret; - driver = qemu_opt_get(opts, "driver"); - netdev = qemu_opt_get(opts, "netdev"); - if (!driver || !netdev) { - return 0; + ret = ioctl(fd, VHOST_GET_FEATURES, &features); + if (ret) { + error_setg(errp, "Fail to query features from vhost-vDPA device"); + return ret; } - if (strcmp(netdev, name) == 0 && - !g_str_has_prefix(driver, "virtio-net-")) { - error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*"); - return -1; + + if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) { + *has_cvq = 1; + } else { + *has_cvq = 0; } - return 0; + + if (features & (1 << VIRTIO_NET_F_MQ)) { + config = g_malloc0(config_size + sizeof(*max_queue_pairs)); + config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs); + config->len = sizeof(*max_queue_pairs); + + ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config); + if (ret) { + error_setg(errp, "Fail to get config from vhost-vDPA device"); + return -ret; + } + + max_queue_pairs = (__virtio16 *)&config->buf; + + return lduw_le_p(max_queue_pairs); + } + + return 1; } int net_init_vhost_vdpa(const Netdev *netdev, const char *name, NetClientState *peer, Error **errp) { const NetdevVhostVDPAOptions *opts; + int vdpa_device_fd; + NetClientState **ncs, *nc; + int queue_pairs, i, has_cvq = 0; assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA); opts = &netdev->u.vhost_vdpa; - /* verify net frontend */ - if (qemu_opts_foreach(qemu_find_opts("device"), net_vhost_check_net, - (char *)name, errp)) { - return -1; + + vdpa_device_fd = qemu_open_old(opts->vhostdev, O_RDWR); + if (vdpa_device_fd == -1) { + return -errno; } - return net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, opts->vhostdev); + + queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, + &has_cvq, errp); + if (queue_pairs < 0) { + qemu_close(vdpa_device_fd); + return queue_pairs; + } + + ncs = g_malloc0(sizeof(*ncs) * queue_pairs); + + for (i = 0; i < queue_pairs; i++) { + ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, + vdpa_device_fd, i, 2, true); + if (!ncs[i]) + goto err; + } + + if (has_cvq) { + nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, + vdpa_device_fd, i, 1, false); + if (!nc) + goto err; + } + + g_free(ncs); + return 0; + +err: + if (i) { + qemu_del_net_client(ncs[0]); + } + qemu_close(vdpa_device_fd); + g_free(ncs); + + return -1; } diff --git a/pc-bios/bios-256k.bin b/pc-bios/bios-256k.bin index 96bdeaa487..dea01da468 100644 Binary files a/pc-bios/bios-256k.bin and b/pc-bios/bios-256k.bin differ diff --git a/pc-bios/bios-microvm.bin b/pc-bios/bios-microvm.bin index eefd32197e..ca6375d65c 100644 Binary files a/pc-bios/bios-microvm.bin and b/pc-bios/bios-microvm.bin differ diff --git a/pc-bios/bios.bin b/pc-bios/bios.bin index 3ecaf4a709..e9fa99f006 100644 Binary files a/pc-bios/bios.bin and b/pc-bios/bios.bin differ diff --git a/pc-bios/descriptors/meson.build b/pc-bios/descriptors/meson.build index 29efa16d99..66f85d01c4 100644 --- a/pc-bios/descriptors/meson.build +++ b/pc-bios/descriptors/meson.build @@ -1,4 +1,4 @@ -if install_edk2_blobs +if unpack_edk2_blobs and get_option('install_blobs') foreach f: [ '50-edk2-i386-secure.json', '50-edk2-x86_64-secure.json', @@ -10,7 +10,7 @@ if install_edk2_blobs configure_file(input: files(f), output: f, configuration: {'DATADIR': get_option('prefix') / qemu_datadir}, - install: get_option('install_blobs'), + install: true, install_dir: qemu_datadir / 'firmware') endforeach endif diff --git a/pc-bios/hppa-firmware.img b/pc-bios/hppa-firmware.img index 4ba8c7f8b8..a34a8c84f5 100644 Binary files a/pc-bios/hppa-firmware.img and b/pc-bios/hppa-firmware.img differ diff --git a/pc-bios/meson.build b/pc-bios/meson.build index f2b32598af..b40ff3f2bd 100644 --- a/pc-bios/meson.build +++ b/pc-bios/meson.build @@ -1,4 +1,5 @@ -if install_edk2_blobs +roms = [] +if unpack_edk2_blobs fds = [ 'edk2-aarch64-code.fd', 'edk2-arm-code.fd', @@ -11,7 +12,7 @@ if install_edk2_blobs ] foreach f : fds - custom_target(f, + roms += custom_target(f, build_by_default: have_system, output: f, input: '@0@.bz2'.format(f), @@ -62,6 +63,7 @@ blobs = files( 'petalogix-s3adsp1800.dtb', 'petalogix-ml605.dtb', 'multiboot.bin', + 'multiboot_dma.bin', 'linuxboot.bin', 'linuxboot_dma.bin', 'kvmvapic.bin', diff --git a/pc-bios/multiboot_dma.bin b/pc-bios/multiboot_dma.bin new file mode 100644 index 0000000000..c0e2c3102a Binary files /dev/null and b/pc-bios/multiboot_dma.bin differ diff --git a/pc-bios/openbios-ppc b/pc-bios/openbios-ppc index 91a73db9a3..67f32a8602 100644 Binary files a/pc-bios/openbios-ppc and b/pc-bios/openbios-ppc differ diff --git a/pc-bios/openbios-sparc32 b/pc-bios/openbios-sparc32 index a5b7389191..376b01c10b 100644 Binary files a/pc-bios/openbios-sparc32 and b/pc-bios/openbios-sparc32 differ diff --git a/pc-bios/openbios-sparc64 b/pc-bios/openbios-sparc64 index f7a501efc6..bbd746fde9 100644 Binary files a/pc-bios/openbios-sparc64 and b/pc-bios/openbios-sparc64 differ diff --git a/pc-bios/optionrom/Makefile b/pc-bios/optionrom/Makefile index 30771f8d17..5d55d25acc 100644 --- a/pc-bios/optionrom/Makefile +++ b/pc-bios/optionrom/Makefile @@ -1,9 +1,8 @@ -CURRENT_MAKEFILE := $(realpath $(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST))) -SRC_DIR := $(dir $(CURRENT_MAKEFILE)) -TOPSRC_DIR := $(SRC_DIR)/../.. +include config.mak +SRC_DIR := $(TOPSRC_DIR)/pc-bios/optionrom VPATH = $(SRC_DIR) -all: multiboot.bin linuxboot.bin linuxboot_dma.bin kvmvapic.bin pvh.bin +all: multiboot.bin multiboot_dma.bin linuxboot.bin linuxboot_dma.bin kvmvapic.bin pvh.bin # Dummy command so that make thinks it has done something @true @@ -42,8 +41,6 @@ override CFLAGS += $(call cc-option, $(Wa)-32) LD_I386_EMULATION ?= elf_i386 override LDFLAGS = -m $(LD_I386_EMULATION) -T $(SRC_DIR)/flat.lds -all: multiboot.bin linuxboot.bin linuxboot_dma.bin kvmvapic.bin pvh.bin - pvh.img: pvh.o pvh_main.o %.o: %.S diff --git a/pc-bios/optionrom/multiboot.S b/pc-bios/optionrom/multiboot.S index b7efe4de34..181a4b03a3 100644 --- a/pc-bios/optionrom/multiboot.S +++ b/pc-bios/optionrom/multiboot.S @@ -68,7 +68,7 @@ run_multiboot: mov %eax, %es /* Read the bootinfo struct into RAM */ - read_fw_blob(FW_CFG_INITRD) + read_fw_blob_dma(FW_CFG_INITRD) /* FS = bootinfo_struct */ read_fw FW_CFG_INITRD_ADDR @@ -188,7 +188,7 @@ prot_mode: movl %eax, %gs /* Read the kernel and modules into RAM */ - read_fw_blob(FW_CFG_KERNEL) + read_fw_blob_dma(FW_CFG_KERNEL) /* Jump off to the kernel */ read_fw FW_CFG_KERNEL_ENTRY diff --git a/pc-bios/optionrom/multiboot_dma.S b/pc-bios/optionrom/multiboot_dma.S new file mode 100644 index 0000000000..d809af3e23 --- /dev/null +++ b/pc-bios/optionrom/multiboot_dma.S @@ -0,0 +1,2 @@ +#define USE_FW_CFG_DMA 1 +#include "multiboot.S" diff --git a/pc-bios/optionrom/optionrom.h b/pc-bios/optionrom/optionrom.h index a2b612f1a7..8d74c0ddf3 100644 --- a/pc-bios/optionrom/optionrom.h +++ b/pc-bios/optionrom/optionrom.h @@ -37,6 +37,17 @@ #define BIOS_CFG_IOPORT_CFG 0x510 #define BIOS_CFG_IOPORT_DATA 0x511 +#define FW_CFG_DMA_CTL_ERROR 0x01 +#define FW_CFG_DMA_CTL_READ 0x02 +#define FW_CFG_DMA_CTL_SKIP 0x04 +#define FW_CFG_DMA_CTL_SELECT 0x08 +#define FW_CFG_DMA_CTL_WRITE 0x10 + +#define FW_CFG_DMA_SIGNATURE 0x51454d5520434647ULL /* "QEMU CFG" */ + +#define BIOS_CFG_DMA_ADDR_HIGH 0x514 +#define BIOS_CFG_DMA_ADDR_LOW 0x518 + /* Break the translation block flow so -d cpu shows us values */ #define DEBUG_HERE \ jmp 1f; \ @@ -62,6 +73,61 @@ bswap %eax .endm + +/* + * Read data from the fw_cfg device using DMA. + * Clobbers: %edx, %eax, ADDR, SIZE, memory[%esp-16] to memory[%esp] + */ +.macro read_fw_dma VAR, SIZE, ADDR + /* Address */ + bswapl \ADDR + pushl \ADDR + + /* We only support 32 bit target addresses */ + xorl %eax, %eax + pushl %eax + mov $BIOS_CFG_DMA_ADDR_HIGH, %dx + outl %eax, (%dx) + + /* Size */ + bswapl \SIZE + pushl \SIZE + + /* Control */ + movl $(\VAR << 16) | (FW_CFG_DMA_CTL_READ | FW_CFG_DMA_CTL_SELECT), %eax + bswapl %eax + pushl %eax + + movl %esp, %eax /* Address of the struct we generated */ + bswapl %eax + mov $BIOS_CFG_DMA_ADDR_LOW, %dx + outl %eax, (%dx) /* Initiate DMA */ + +1: mov (%esp), %eax /* Wait for completion */ + bswapl %eax + testl $~FW_CFG_DMA_CTL_ERROR, %eax + jnz 1b + addl $16, %esp +.endm + + +/* + * Read a blob from the fw_cfg device using DMA + * Requires _ADDR, _SIZE and _DATA values for the parameter. + * + * Clobbers: %eax, %edx, %es, %ecx, %edi and adresses %esp-20 to %esp + */ +#ifdef USE_FW_CFG_DMA +#define read_fw_blob_dma(var) \ + read_fw var ## _SIZE; \ + mov %eax, %ecx; \ + read_fw var ## _ADDR; \ + mov %eax, %edi ;\ + read_fw_dma var ## _DATA, %ecx, %edi +#else +#define read_fw_blob_dma(var) read_fw_blob(var) +#endif + #define read_fw_blob_pre(var) \ read_fw var ## _SIZE; \ mov %eax, %ecx; \ diff --git a/pc-bios/vgabios-ati.bin b/pc-bios/vgabios-ati.bin index 118caacde6..0de9c0d3c1 100644 Binary files a/pc-bios/vgabios-ati.bin and b/pc-bios/vgabios-ati.bin differ diff --git a/pc-bios/vgabios-bochs-display.bin b/pc-bios/vgabios-bochs-display.bin index d79124b760..76e076f029 100644 Binary files a/pc-bios/vgabios-bochs-display.bin and b/pc-bios/vgabios-bochs-display.bin differ diff --git a/pc-bios/vgabios-cirrus.bin b/pc-bios/vgabios-cirrus.bin index 37bf5e7fe7..0f16ad199d 100644 Binary files a/pc-bios/vgabios-cirrus.bin and b/pc-bios/vgabios-cirrus.bin differ diff --git a/pc-bios/vgabios-qxl.bin b/pc-bios/vgabios-qxl.bin index 0ff8ff21f0..420f9ed331 100644 Binary files a/pc-bios/vgabios-qxl.bin and b/pc-bios/vgabios-qxl.bin differ diff --git a/pc-bios/vgabios-ramfb.bin b/pc-bios/vgabios-ramfb.bin index df5e9d615a..b9b2e230a5 100644 Binary files a/pc-bios/vgabios-ramfb.bin and b/pc-bios/vgabios-ramfb.bin differ diff --git a/pc-bios/vgabios-stdvga.bin b/pc-bios/vgabios-stdvga.bin index 70f6094153..b59256572d 100644 Binary files a/pc-bios/vgabios-stdvga.bin and b/pc-bios/vgabios-stdvga.bin differ diff --git a/pc-bios/vgabios-virtio.bin b/pc-bios/vgabios-virtio.bin index 65af5be59d..c00414ab83 100644 Binary files a/pc-bios/vgabios-virtio.bin and b/pc-bios/vgabios-virtio.bin differ diff --git a/pc-bios/vgabios-vmware.bin b/pc-bios/vgabios-vmware.bin index 89d42b32c1..5454aceda8 100644 Binary files a/pc-bios/vgabios-vmware.bin and b/pc-bios/vgabios-vmware.bin differ diff --git a/pc-bios/vgabios.bin b/pc-bios/vgabios.bin index 320471e18d..ae82887d73 100644 Binary files a/pc-bios/vgabios.bin and b/pc-bios/vgabios.bin differ diff --git a/plugins/api.c b/plugins/api.c index acff9ce8ac..b143b09ce9 100644 --- a/plugins/api.c +++ b/plugins/api.c @@ -45,7 +45,6 @@ #include "qemu/plugin-memory.h" #include "hw/boards.h" #endif -#include "trace/mem.h" /* Uninstall and Reset handlers */ @@ -246,22 +245,25 @@ const char *qemu_plugin_insn_symbol(const struct qemu_plugin_insn *insn) unsigned qemu_plugin_mem_size_shift(qemu_plugin_meminfo_t info) { - return info & TRACE_MEM_SZ_SHIFT_MASK; + MemOp op = get_memop(info); + return op & MO_SIZE; } bool qemu_plugin_mem_is_sign_extended(qemu_plugin_meminfo_t info) { - return !!(info & TRACE_MEM_SE); + MemOp op = get_memop(info); + return op & MO_SIGN; } bool qemu_plugin_mem_is_big_endian(qemu_plugin_meminfo_t info) { - return !!(info & TRACE_MEM_BE); + MemOp op = get_memop(info); + return (op & MO_BSWAP) == MO_BE; } bool qemu_plugin_mem_is_store(qemu_plugin_meminfo_t info) { - return !!(info & TRACE_MEM_ST); + return get_plugin_meminfo_rw(info) & QEMU_PLUGIN_MEM_W; } /* @@ -277,11 +279,12 @@ struct qemu_plugin_hwaddr *qemu_plugin_get_hwaddr(qemu_plugin_meminfo_t info, { #ifdef CONFIG_SOFTMMU CPUState *cpu = current_cpu; - unsigned int mmu_idx = info >> TRACE_MEM_MMU_SHIFT; - hwaddr_info.is_store = info & TRACE_MEM_ST; + unsigned int mmu_idx = get_mmuidx(info); + enum qemu_plugin_mem_rw rw = get_plugin_meminfo_rw(info); + hwaddr_info.is_store = (rw & QEMU_PLUGIN_MEM_W) != 0; if (!tlb_plugin_lookup(cpu, vaddr, mmu_idx, - info & TRACE_MEM_ST, &hwaddr_info)) { + hwaddr_info.is_store, &hwaddr_info)) { error_report("invalid use of qemu_plugin_get_hwaddr"); return NULL; } diff --git a/plugins/core.c b/plugins/core.c index 6b2490f973..792262da08 100644 --- a/plugins/core.c +++ b/plugins/core.c @@ -27,7 +27,6 @@ #include "exec/helper-proto.h" #include "tcg/tcg.h" #include "tcg/tcg-op.h" -#include "trace/mem.h" /* mem_info macros */ #include "plugin.h" #include "qemu/compiler.h" @@ -446,7 +445,8 @@ void exec_inline_op(struct qemu_plugin_dyn_cb *cb) } } -void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, uint32_t info) +void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, + MemOpIdx oi, enum qemu_plugin_mem_rw rw) { GArray *arr = cpu->plugin_mem_cbs; size_t i; @@ -457,14 +457,14 @@ void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr, uint32_t info) for (i = 0; i < arr->len; i++) { struct qemu_plugin_dyn_cb *cb = &g_array_index(arr, struct qemu_plugin_dyn_cb, i); - int w = !!(info & TRACE_MEM_ST) + 1; - if (!(w & cb->rw)) { + if (!(rw & cb->rw)) { break; } switch (cb->type) { case PLUGIN_CB_REGULAR: - cb->f.vcpu_mem(cpu->cpu_index, info, vaddr, cb->userp); + cb->f.vcpu_mem(cpu->cpu_index, make_plugin_meminfo(oi, rw), + vaddr, cb->userp); break; case PLUGIN_CB_INLINE: exec_inline_op(cb); diff --git a/plugins/meson.build b/plugins/meson.build index bfd5c9822a..aeb386ebae 100644 --- a/plugins/meson.build +++ b/plugins/meson.build @@ -2,9 +2,9 @@ plugin_ldflags = [] # Modules need more symbols than just those in plugins/qemu-plugins.symbols if not enable_modules if 'CONFIG_HAS_LD_DYNAMIC_LIST' in config_host - plugin_ldflags = ['-Wl,--dynamic-list=' + (meson.build_root() / 'qemu-plugins-ld.symbols')] + plugin_ldflags = ['-Wl,--dynamic-list=' + (meson.project_build_root() / 'qemu-plugins-ld.symbols')] elif 'CONFIG_HAS_LD_EXPORTED_SYMBOLS_LIST' in config_host - plugin_ldflags = ['-Wl,-exported_symbols_list,' + (meson.build_root() / 'qemu-plugins-ld64.symbols')] + plugin_ldflags = ['-Wl,-exported_symbols_list,' + (meson.project_build_root() / 'qemu-plugins-ld64.symbols')] endif endif diff --git a/plugins/qemu-plugins.symbols b/plugins/qemu-plugins.symbols index 67b309ea2a..4834756ba3 100644 --- a/plugins/qemu-plugins.symbols +++ b/plugins/qemu-plugins.symbols @@ -1,11 +1,14 @@ { qemu_plugin_bool_parse; qemu_plugin_get_hwaddr; + qemu_plugin_hwaddr_device_name; qemu_plugin_hwaddr_is_io; + qemu_plugin_hwaddr_phys_addr; qemu_plugin_insn_data; qemu_plugin_insn_disas; qemu_plugin_insn_haddr; qemu_plugin_insn_size; + qemu_plugin_insn_symbol; qemu_plugin_insn_vaddr; qemu_plugin_mem_is_big_endian; qemu_plugin_mem_is_sign_extended; diff --git a/po/tr.po b/po/tr.po index 632c7f3851..f4f0425c43 100644 --- a/po/tr.po +++ b/po/tr.po @@ -1,14 +1,15 @@ # Turkish translation for QEMU. # This file is put in the public domain. # Ozan Çağlayan , 2013. +# Oğuz Ersen , 2021. # msgid "" msgstr "" "Project-Id-Version: QEMU 1.4.50\n" "Report-Msgid-Bugs-To: qemu-devel@nongnu.org\n" "POT-Creation-Date: 2018-07-18 07:56+0200\n" -"PO-Revision-Date: 2013-04-22 18:35+0300\n" -"Last-Translator: Ozan Çağlayan \n" +"PO-Revision-Date: 2021-08-15 22:17+0300\n" +"Last-Translator: Oğuz Ersen \n" "Language-Team: Türkçe <>\n" "Language: tr\n" "MIME-Version: 1.0\n" @@ -33,24 +34,22 @@ msgid "Power _Down" msgstr "_Kapat" msgid "_Quit" -msgstr "" +msgstr "_Çıkış" msgid "_Fullscreen" -msgstr "" +msgstr "_Tam Ekran" msgid "_Copy" -msgstr "" +msgstr "_Kopyala" -#, fuzzy msgid "Zoom _In" -msgstr "Yakınlaş ve Sığ_dır" +msgstr "_Yakınlaş" -#, fuzzy msgid "Zoom _Out" -msgstr "Yakınlaş ve Sığ_dır" +msgstr "_Uzaklaş" msgid "Best _Fit" -msgstr "" +msgstr "_En Uygun" msgid "Zoom To _Fit" msgstr "Yakınlaş ve Sığ_dır" @@ -62,13 +61,13 @@ msgid "_Grab Input" msgstr "Girdiyi _Yakala" msgid "Show _Tabs" -msgstr "Se_kmeleri Göster" +msgstr "_Sekmeleri Göster" msgid "Detach Tab" -msgstr "" +msgstr "Sekmeyi Ayır" msgid "Show Menubar" -msgstr "" +msgstr "Menü Çubuğunu Göster" msgid "_Machine" msgstr "_Makine" diff --git a/python/qemu/aqmp/__init__.py b/python/qemu/aqmp/__init__.py index ab1782999c..880d5b6fa7 100644 --- a/python/qemu/aqmp/__init__.py +++ b/python/qemu/aqmp/__init__.py @@ -21,7 +21,7 @@ managing QMP events. # This work is licensed under the terms of the GNU GPL, version 2. See # the COPYING file in the top-level directory. -import warnings +import logging from .error import AQMPError from .events import EventListener @@ -30,16 +30,8 @@ from .protocol import ConnectError, Runstate, StateError from .qmp_client import ExecInterruptedError, ExecuteError, QMPClient -_WMSG = """ - -The Asynchronous QMP library is currently in development and its API -should be considered highly fluid and subject to change. It should -not be used by any other scripts checked into the QEMU tree. - -Proceed with caution! -""" - -warnings.warn(_WMSG, FutureWarning) +# Suppress logging unless an application engages it. +logging.getLogger('qemu.aqmp').addHandler(logging.NullHandler()) # The order of these fields impact the Sphinx documentation order. diff --git a/python/qemu/aqmp/events.py b/python/qemu/aqmp/events.py index fb81d21610..5f7150c78d 100644 --- a/python/qemu/aqmp/events.py +++ b/python/qemu/aqmp/events.py @@ -556,7 +556,13 @@ class EventListener: """ return await self._queue.get() - def clear(self) -> None: + def empty(self) -> bool: + """ + Return `True` if there are no pending events. + """ + return self._queue.empty() + + def clear(self) -> List[Message]: """ Clear this listener of all pending events. @@ -564,17 +570,22 @@ class EventListener: pending FIFO queue synchronously. It can be also be used to manually clear any pending events, if desired. + :return: The cleared events, if any. + .. warning:: Take care when discarding events. Cleared events will be silently tossed on the floor. All events that were ever accepted by this listener are visible in `history()`. """ + events = [] while True: try: - self._queue.get_nowait() + events.append(self._queue.get_nowait()) except asyncio.QueueEmpty: break + return events + def __aiter__(self) -> AsyncIterator[Message]: return self diff --git a/python/qemu/aqmp/legacy.py b/python/qemu/aqmp/legacy.py new file mode 100644 index 0000000000..9e7b9fb80b --- /dev/null +++ b/python/qemu/aqmp/legacy.py @@ -0,0 +1,138 @@ +""" +Sync QMP Wrapper + +This class pretends to be qemu.qmp.QEMUMonitorProtocol. +""" + +import asyncio +from typing import ( + Awaitable, + List, + Optional, + TypeVar, + Union, +) + +import qemu.qmp +from qemu.qmp import QMPMessage, QMPReturnValue, SocketAddrT + +from .qmp_client import QMPClient + + +# pylint: disable=missing-docstring + + +class QEMUMonitorProtocol(qemu.qmp.QEMUMonitorProtocol): + def __init__(self, address: SocketAddrT, + server: bool = False, + nickname: Optional[str] = None): + + # pylint: disable=super-init-not-called + self._aqmp = QMPClient(nickname) + self._aloop = asyncio.get_event_loop() + self._address = address + self._timeout: Optional[float] = None + + _T = TypeVar('_T') + + def _sync( + self, future: Awaitable[_T], timeout: Optional[float] = None + ) -> _T: + return self._aloop.run_until_complete( + asyncio.wait_for(future, timeout=timeout) + ) + + def _get_greeting(self) -> Optional[QMPMessage]: + if self._aqmp.greeting is not None: + # pylint: disable=protected-access + return self._aqmp.greeting._asdict() + return None + + # __enter__ and __exit__ need no changes + # parse_address needs no changes + + def connect(self, negotiate: bool = True) -> Optional[QMPMessage]: + self._aqmp.await_greeting = negotiate + self._aqmp.negotiate = negotiate + + self._sync( + self._aqmp.connect(self._address) + ) + return self._get_greeting() + + def accept(self, timeout: Optional[float] = 15.0) -> QMPMessage: + self._aqmp.await_greeting = True + self._aqmp.negotiate = True + + self._sync( + self._aqmp.accept(self._address), + timeout + ) + + ret = self._get_greeting() + assert ret is not None + return ret + + def cmd_obj(self, qmp_cmd: QMPMessage) -> QMPMessage: + return dict( + self._sync( + # pylint: disable=protected-access + + # _raw() isn't a public API, because turning off + # automatic ID assignment is discouraged. For + # compatibility with iotests *only*, do it anyway. + self._aqmp._raw(qmp_cmd, assign_id=False), + self._timeout + ) + ) + + # Default impl of cmd() delegates to cmd_obj + + def command(self, cmd: str, **kwds: object) -> QMPReturnValue: + return self._sync( + self._aqmp.execute(cmd, kwds), + self._timeout + ) + + def pull_event(self, + wait: Union[bool, float] = False) -> Optional[QMPMessage]: + if not wait: + # wait is False/0: "do not wait, do not except." + if self._aqmp.events.empty(): + return None + + # If wait is 'True', wait forever. If wait is False/0, the events + # queue must not be empty; but it still needs some real amount + # of time to complete. + timeout = None + if wait and isinstance(wait, float): + timeout = wait + + return dict( + self._sync( + self._aqmp.events.get(), + timeout + ) + ) + + def get_events(self, wait: Union[bool, float] = False) -> List[QMPMessage]: + events = [dict(x) for x in self._aqmp.events.clear()] + if events: + return events + + event = self.pull_event(wait) + return [event] if event is not None else [] + + def clear_events(self) -> None: + self._aqmp.events.clear() + + def close(self) -> None: + self._sync( + self._aqmp.disconnect() + ) + + def settimeout(self, timeout: Optional[float]) -> None: + self._timeout = timeout + + def send_fd_scm(self, fd: int) -> None: + self._aqmp.send_fd_scm(fd) diff --git a/python/qemu/aqmp/models.py b/python/qemu/aqmp/models.py index 24c94123ac..de87f87804 100644 --- a/python/qemu/aqmp/models.py +++ b/python/qemu/aqmp/models.py @@ -8,8 +8,10 @@ data to make sure it conforms to spec. # pylint: disable=too-few-public-methods from collections import abc +import copy from typing import ( Any, + Dict, Mapping, Optional, Sequence, @@ -66,6 +68,17 @@ class Greeting(Model): self._check_member('QMP', abc.Mapping, "JSON object") self.QMP = QMPGreeting(self._raw['QMP']) + def _asdict(self) -> Dict[str, object]: + """ + For compatibility with the iotests sync QMP wrapper. + + The legacy QMP interface needs Greetings as a garden-variety Dict. + + This interface is private in the hopes that it will be able to + be dropped again in the near-future. Caller beware! + """ + return dict(copy.deepcopy(self._raw)) + class QMPGreeting(Model): """ diff --git a/python/qemu/aqmp/protocol.py b/python/qemu/aqmp/protocol.py index 32e78749c1..ae1df24026 100644 --- a/python/qemu/aqmp/protocol.py +++ b/python/qemu/aqmp/protocol.py @@ -721,8 +721,11 @@ class AsyncProtocol(Generic[T]): self.logger.debug("Task.%s: cancelled.", name) return except BaseException as err: - self.logger.error("Task.%s: %s", - name, exception_summary(err)) + self.logger.log( + logging.INFO if isinstance(err, EOFError) else logging.ERROR, + "Task.%s: %s", + name, exception_summary(err) + ) self.logger.debug("Task.%s: failure:\n%s\n", name, pretty_traceback()) self._schedule_disconnect() diff --git a/python/qemu/aqmp/qmp_client.py b/python/qemu/aqmp/qmp_client.py index 82e9dab124..f987da02eb 100644 --- a/python/qemu/aqmp/qmp_client.py +++ b/python/qemu/aqmp/qmp_client.py @@ -9,6 +9,8 @@ accept an incoming connection from that server. import asyncio import logging +import socket +import struct from typing import ( Dict, List, @@ -224,6 +226,11 @@ class QMPClient(AsyncProtocol[Message], Events): 'asyncio.Queue[QMPClient._PendingT]' ] = {} + @property + def greeting(self) -> Optional[Greeting]: + """The `Greeting` from the QMP server, if any.""" + return self._greeting + @upper_half async def _establish_session(self) -> None: """ @@ -619,3 +626,23 @@ class QMPClient(AsyncProtocol[Message], Events): """ msg = self.make_execute_msg(cmd, arguments, oob=oob) return await self.execute_msg(msg) + + @upper_half + @require(Runstate.RUNNING) + def send_fd_scm(self, fd: int) -> None: + """ + Send a file descriptor to the remote via SCM_RIGHTS. + """ + assert self._writer is not None + sock = self._writer.transport.get_extra_info('socket') + + if sock.family != socket.AF_UNIX: + raise AQMPError("Sending file descriptors requires a UNIX socket.") + + # Void the warranty sticker. + # Access to sendmsg in asyncio is scheduled for removal in Python 3.11. + sock = sock._sock # pylint: disable=protected-access + sock.sendmsg( + [b' '], + [(socket.SOL_SOCKET, socket.SCM_RIGHTS, struct.pack('@i', fd))] + ) diff --git a/python/qemu/machine/README.rst b/python/qemu/machine/README.rst index ac2b4fffb4..8de2c3d772 100644 --- a/python/qemu/machine/README.rst +++ b/python/qemu/machine/README.rst @@ -2,7 +2,7 @@ qemu.machine package ==================== This package provides core utilities used for testing and debugging -QEMU. It is used by the iotests, vm tests, acceptance tests, and several +QEMU. It is used by the iotests, vm tests, avocado tests, and several other utilities in the ./scripts directory. It is not a fully-fledged SDK and it is subject to change at any time. diff --git a/python/qemu/machine/machine.py b/python/qemu/machine/machine.py index 34131884a5..a487c39745 100644 --- a/python/qemu/machine/machine.py +++ b/python/qemu/machine/machine.py @@ -41,7 +41,6 @@ from typing import ( ) from qemu.qmp import ( # pylint: disable=import-error - QEMUMonitorProtocol, QMPMessage, QMPReturnValue, SocketAddrT, @@ -50,6 +49,12 @@ from qemu.qmp import ( # pylint: disable=import-error from . import console_socket +if os.environ.get('QEMU_PYTHON_LEGACY_QMP'): + from qemu.qmp import QEMUMonitorProtocol +else: + from qemu.aqmp.legacy import QEMUMonitorProtocol + + LOG = logging.getLogger(__name__) @@ -98,7 +103,6 @@ class QEMUMachine: name: Optional[str] = None, base_temp_dir: str = "/var/tmp", monitor_address: Optional[SocketAddrT] = None, - socket_scm_helper: Optional[str] = None, sock_dir: Optional[str] = None, drain_console: bool = False, console_log: Optional[str] = None, @@ -113,7 +117,6 @@ class QEMUMachine: @param name: prefix for socket and log file names (default: qemu-PID) @param base_temp_dir: default location where temp files are created @param monitor_address: address for QMP monitor - @param socket_scm_helper: helper program, required for send_fd_scm() @param sock_dir: where to create socket (defaults to base_temp_dir) @param drain_console: (optional) True to drain console socket to buffer @param console_log: (optional) path to console log file @@ -134,7 +137,6 @@ class QEMUMachine: self._base_temp_dir = base_temp_dir self._sock_dir = sock_dir or self._base_temp_dir self._log_dir = log_dir - self._socket_scm_helper = socket_scm_helper if monitor_address is not None: self._monitor_address = monitor_address @@ -173,6 +175,7 @@ class QEMUMachine: self._console_socket: Optional[socket.socket] = None self._remove_files: List[str] = [] self._user_killed = False + self._quit_issued = False def __enter__(self: _T) -> _T: return self @@ -213,48 +216,22 @@ class QEMUMachine: def send_fd_scm(self, fd: Optional[int] = None, file_path: Optional[str] = None) -> int: """ - Send an fd or file_path to socket_scm_helper. + Send an fd or file_path to the remote via SCM_RIGHTS. - Exactly one of fd and file_path must be given. - If it is file_path, the helper will open that file and pass its own fd. + Exactly one of fd and file_path must be given. If it is + file_path, the file will be opened read-only and the new file + descriptor will be sent to the remote. """ - # In iotest.py, the qmp should always use unix socket. - assert self._qmp.is_scm_available() - if self._socket_scm_helper is None: - raise QEMUMachineError("No path to socket_scm_helper set") - if not os.path.exists(self._socket_scm_helper): - raise QEMUMachineError("%s does not exist" % - self._socket_scm_helper) - - # This did not exist before 3.4, but since then it is - # mandatory for our purpose - if hasattr(os, 'set_inheritable'): - os.set_inheritable(self._qmp.get_sock_fd(), True) - if fd is not None: - os.set_inheritable(fd, True) - - fd_param = ["%s" % self._socket_scm_helper, - "%d" % self._qmp.get_sock_fd()] - if file_path is not None: assert fd is None - fd_param.append(file_path) + with open(file_path, "rb") as passfile: + fd = passfile.fileno() + self._qmp.send_fd_scm(fd) else: assert fd is not None - fd_param.append(str(fd)) + self._qmp.send_fd_scm(fd) - proc = subprocess.run( - fd_param, - stdin=subprocess.DEVNULL, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - check=False, - close_fds=False, - ) - if proc.stdout: - LOG.debug(proc.stdout) - - return proc.returncode + return 0 @staticmethod def _remove_if_exists(path: str) -> None: @@ -370,9 +347,15 @@ class QEMUMachine: # Comprehensive reset for the failed launch case: self._early_cleanup() - if self._qmp_connection: - self._qmp.close() - self._qmp_connection = None + try: + self._close_qmp_connection() + except Exception as err: # pylint: disable=broad-except + LOG.warning( + "Exception closing QMP connection: %s", + str(err) if str(err) else type(err).__name__ + ) + finally: + assert self._qmp_connection is None self._close_qemu_log_file() @@ -397,6 +380,7 @@ class QEMUMachine: command = '' LOG.warning(msg, -int(exitcode), command) + self._quit_issued = False self._user_killed = False self._launched = False @@ -447,6 +431,31 @@ class QEMUMachine: close_fds=False) self._post_launch() + def _close_qmp_connection(self) -> None: + """ + Close the underlying QMP connection, if any. + + Dutifully report errors that occurred while closing, but assume + that any error encountered indicates an abnormal termination + process and not a failure to close. + """ + if self._qmp_connection is None: + return + + try: + self._qmp.close() + except EOFError: + # EOF can occur as an Exception here when using the Async + # QMP backend. It indicates that the server closed the + # stream. If we successfully issued 'quit' at any point, + # then this was expected. If the remote went away without + # our permission, it's worth reporting that as an abnormal + # shutdown case. + if not (self._user_killed or self._quit_issued): + raise + finally: + self._qmp_connection = None + def _early_cleanup(self) -> None: """ Perform any cleanup that needs to happen before the VM exits. @@ -472,15 +481,13 @@ class QEMUMachine: self._subp.kill() self._subp.wait(timeout=60) - def _soft_shutdown(self, timeout: Optional[int], - has_quit: bool = False) -> None: + def _soft_shutdown(self, timeout: Optional[int]) -> None: """ Perform early cleanup, attempt to gracefully shut down the VM, and wait for it to terminate. :param timeout: Timeout in seconds for graceful shutdown. A value of None is an infinite wait. - :param has_quit: When True, don't attempt to issue 'quit' QMP command :raise ConnectionReset: On QMP communication errors :raise subprocess.TimeoutExpired: When timeout is exceeded waiting for @@ -489,21 +496,24 @@ class QEMUMachine: self._early_cleanup() if self._qmp_connection: - if not has_quit: - # Might raise ConnectionReset - self._qmp.cmd('quit') + try: + if not self._quit_issued: + # May raise ExecInterruptedError or StateError if the + # connection dies or has *already* died. + self.qmp('quit') + finally: + # Regardless, we want to quiesce the connection. + self._close_qmp_connection() # May raise subprocess.TimeoutExpired self._subp.wait(timeout=timeout) - def _do_shutdown(self, timeout: Optional[int], - has_quit: bool = False) -> None: + def _do_shutdown(self, timeout: Optional[int]) -> None: """ Attempt to shutdown the VM gracefully; fallback to a hard shutdown. :param timeout: Timeout in seconds for graceful shutdown. A value of None is an infinite wait. - :param has_quit: When True, don't attempt to issue 'quit' QMP command :raise AbnormalShutdown: When the VM could not be shut down gracefully. The inner exception will likely be ConnectionReset or @@ -511,13 +521,13 @@ class QEMUMachine: may result in its own exceptions, likely subprocess.TimeoutExpired. """ try: - self._soft_shutdown(timeout, has_quit) + self._soft_shutdown(timeout) except Exception as exc: self._hard_shutdown() raise AbnormalShutdown("Could not perform graceful shutdown") \ from exc - def shutdown(self, has_quit: bool = False, + def shutdown(self, hard: bool = False, timeout: Optional[int] = 30) -> None: """ @@ -527,7 +537,6 @@ class QEMUMachine: If the VM has not yet been launched, or shutdown(), wait(), or kill() have already been called, this method does nothing. - :param has_quit: When true, do not attempt to issue 'quit' QMP command. :param hard: When true, do not attempt graceful shutdown, and suppress the SIGKILL warning log message. :param timeout: Optional timeout in seconds for graceful shutdown. @@ -541,7 +550,7 @@ class QEMUMachine: self._user_killed = True self._hard_shutdown() else: - self._do_shutdown(timeout, has_quit) + self._do_shutdown(timeout) finally: self._post_shutdown() @@ -558,7 +567,8 @@ class QEMUMachine: :param timeout: Optional timeout in seconds. Default 30 seconds. A value of `None` is an infinite wait. """ - self.shutdown(has_quit=True, timeout=timeout) + self._quit_issued = True + self.shutdown(timeout=timeout) def set_qmp_monitor(self, enabled: bool = True) -> None: """ @@ -603,7 +613,10 @@ class QEMUMachine: conv_keys = True qmp_args = self._qmp_args(conv_keys, args) - return self._qmp.cmd(cmd, args=qmp_args) + ret = self._qmp.cmd(cmd, args=qmp_args) + if cmd == 'quit' and 'error' not in ret and 'return' in ret: + self._quit_issued = True + return ret def command(self, cmd: str, conv_keys: bool = True, @@ -614,7 +627,10 @@ class QEMUMachine: On failure raise an exception. """ qmp_args = self._qmp_args(conv_keys, args) - return self._qmp.command(cmd, **qmp_args) + ret = self._qmp.command(cmd, **qmp_args) + if cmd == 'quit': + self._quit_issued = True + return ret def get_qmp_event(self, wait: bool = False) -> Optional[QMPMessage]: """ @@ -631,7 +647,6 @@ class QEMUMachine: events = self._qmp.get_events(wait=wait) events.extend(self._events) del self._events[:] - self._qmp.clear_events() return events @staticmethod diff --git a/python/qemu/machine/qtest.py b/python/qemu/machine/qtest.py index 395cc8fbfe..f2f9aaa5e5 100644 --- a/python/qemu/machine/qtest.py +++ b/python/qemu/machine/qtest.py @@ -115,7 +115,6 @@ class QEMUQtestMachine(QEMUMachine): wrapper: Sequence[str] = (), name: Optional[str] = None, base_temp_dir: str = "/var/tmp", - socket_scm_helper: Optional[str] = None, sock_dir: Optional[str] = None, qmp_timer: Optional[float] = None): # pylint: disable=too-many-arguments @@ -126,7 +125,6 @@ class QEMUQtestMachine(QEMUMachine): sock_dir = base_temp_dir super().__init__(binary, args, wrapper=wrapper, name=name, base_temp_dir=base_temp_dir, - socket_scm_helper=socket_scm_helper, sock_dir=sock_dir, qmp_timer=qmp_timer) self._qtest: Optional[QEMUQtestProtocol] = None self._qtest_path = os.path.join(sock_dir, name + "-qtest.sock") diff --git a/python/qemu/qmp/README.rst b/python/qemu/qmp/README.rst index c21951491c..5bfb82535f 100644 --- a/python/qemu/qmp/README.rst +++ b/python/qemu/qmp/README.rst @@ -3,7 +3,7 @@ qemu.qmp package This package provides a library used for connecting to and communicating with QMP servers. It is used extensively by iotests, vm tests, -acceptance tests, and other utilities in the ./scripts directory. It is +avocado tests, and other utilities in the ./scripts directory. It is not a fully-fledged SDK and is subject to change at any time. See the documentation in ``__init__.py`` for more information. diff --git a/python/qemu/qmp/__init__.py b/python/qemu/qmp/__init__.py index 269516a79b..358c0971d0 100644 --- a/python/qemu/qmp/__init__.py +++ b/python/qemu/qmp/__init__.py @@ -21,6 +21,7 @@ import errno import json import logging import socket +import struct from types import TracebackType from typing import ( Any, @@ -361,7 +362,7 @@ class QEMUMonitorProtocol: def get_events(self, wait: bool = False) -> List[QMPMessage]: """ - Get a list of available QMP events. + Get a list of available QMP events and clear all pending events. @param wait (bool): block until an event is available. @param wait (float): If wait is a float, treat it as a timeout value. @@ -374,7 +375,9 @@ class QEMUMonitorProtocol: @return The list of available QMP events. """ self.__get_events(wait) - return self.__events + events = self.__events + self.__events = [] + return events def clear_events(self) -> None: """ @@ -406,18 +409,14 @@ class QEMUMonitorProtocol: raise ValueError(msg) self.__sock.settimeout(timeout) - def get_sock_fd(self) -> int: + def send_fd_scm(self, fd: int) -> None: """ - Get the socket file descriptor. + Send a file descriptor to the remote via SCM_RIGHTS. + """ + if self.__sock.family != socket.AF_UNIX: + raise RuntimeError("Can't use SCM_RIGHTS on non-AF_UNIX socket.") - @return The file descriptor number. - """ - return self.__sock.fileno() - - def is_scm_available(self) -> bool: - """ - Check if the socket allows for SCM_RIGHTS. - - @return True if SCM_RIGHTS is available, otherwise False. - """ - return self.__sock.family == socket.AF_UNIX + self.__sock.sendmsg( + [b' '], + [(socket.SOL_SOCKET, socket.SCM_RIGHTS, struct.pack('@i', fd))] + ) diff --git a/python/qemu/qmp/qmp_shell.py b/python/qemu/qmp/qmp_shell.py index 337acfce2d..e7d7eb18f1 100644 --- a/python/qemu/qmp/qmp_shell.py +++ b/python/qemu/qmp/qmp_shell.py @@ -381,7 +381,6 @@ class QMPShell(qmp.QEMUMonitorProtocol): if cmdline == '': for event in self.get_events(): print(event) - self.clear_events() return True return self._execute_cmd(cmdline) diff --git a/python/qemu/utils/README.rst b/python/qemu/utils/README.rst index 975fbf4d7d..d5f2da1454 100644 --- a/python/qemu/utils/README.rst +++ b/python/qemu/utils/README.rst @@ -2,6 +2,6 @@ qemu.utils package ================== This package provides miscellaneous utilities used for testing and -debugging QEMU. It is used primarily by the vm and acceptance tests. +debugging QEMU. It is used primarily by the vm and avocado tests. See the documentation in ``__init__.py`` for more information. diff --git a/python/tests/iotests-mypy.sh b/python/tests/iotests-mypy.sh new file mode 100755 index 0000000000..ee76470819 --- /dev/null +++ b/python/tests/iotests-mypy.sh @@ -0,0 +1,4 @@ +#!/bin/sh -e + +cd ../tests/qemu-iotests/ +python3 -m linters --mypy diff --git a/python/tests/iotests-pylint.sh b/python/tests/iotests-pylint.sh new file mode 100755 index 0000000000..4cae03424b --- /dev/null +++ b/python/tests/iotests-pylint.sh @@ -0,0 +1,4 @@ +#!/bin/sh -e + +cd ../tests/qemu-iotests/ +python3 -m linters --pylint diff --git a/qapi/block-core.json b/qapi/block-core.json index 623a4f4a3f..1d3dd9cb48 100644 --- a/qapi/block-core.json +++ b/qapi/block-core.json @@ -491,11 +491,11 @@ # @granularity: granularity of the dirty bitmap in bytes (since 1.4) # # @recording: true if the bitmap is recording new writes from the guest. -# Replaces `active` and `disabled` statuses. (since 4.0) +# Replaces ``active`` and ``disabled`` statuses. (since 4.0) # # @busy: true if the bitmap is in-use by some operation (NBD or jobs) # and cannot be modified via QMP or used by another operation. -# Replaces `locked` and `frozen` statuses. (since 4.0) +# Replaces ``locked`` and ``frozen`` statuses. (since 4.0) # # @persistent: true if the bitmap was stored on disk, is scheduled to be stored # on disk, or both. (since 4.0) @@ -1438,6 +1438,9 @@ # # @x-perf: Performance options. (Since 6.0) # +# Features: +# @unstable: Member @x-perf is experimental. +# # Note: @on-source-error and @on-target-error only affect background # I/O. If an error occurs during a guest write request, the device's # rerror/werror actions will be used. @@ -1452,7 +1455,9 @@ '*on-source-error': 'BlockdevOnError', '*on-target-error': 'BlockdevOnError', '*auto-finalize': 'bool', '*auto-dismiss': 'bool', - '*filter-node-name': 'str', '*x-perf': 'BackupPerf' } } + '*filter-node-name': 'str', + '*x-perf': { 'type': 'BackupPerf', + 'features': [ 'unstable' ] } } } ## # @DriveBackup: @@ -1704,6 +1709,9 @@ # The operation can be stopped before it has completed using the # block-job-cancel command. # +# Features: +# @deprecated: This command is deprecated. Use @blockdev-backup instead. +# # Returns: - nothing on success # - If @device is not a valid block device, GenericError # @@ -1719,7 +1727,7 @@ # ## { 'command': 'drive-backup', 'boxed': true, - 'data': 'DriveBackup' } + 'data': 'DriveBackup', 'features': ['deprecated'] } ## # @blockdev-backup: @@ -1916,9 +1924,13 @@ # # Get the block graph. # +# Features: +# @unstable: This command is meant for debugging. +# # Since: 4.0 ## -{ 'command': 'x-debug-query-block-graph', 'returns': 'XDbgBlockGraph' } +{ 'command': 'x-debug-query-block-graph', 'returns': 'XDbgBlockGraph', + 'features': [ 'unstable' ] } ## # @drive-mirror: @@ -2257,6 +2269,9 @@ # # Get bitmap SHA256. # +# Features: +# @unstable: This command is meant for debugging. +# # Returns: - BlockDirtyBitmapSha256 on success # - If @node is not a valid block device, DeviceNotFound # - If @name is not found or if hashing has failed, GenericError with an @@ -2265,7 +2280,8 @@ # Since: 2.10 ## { 'command': 'x-debug-block-dirty-bitmap-sha256', - 'data': 'BlockDirtyBitmap', 'returns': 'BlockDirtyBitmapSha256' } + 'data': 'BlockDirtyBitmap', 'returns': 'BlockDirtyBitmapSha256', + 'features': [ 'unstable' ] } ## # @blockdev-mirror: @@ -2495,27 +2511,57 @@ # # Properties for throttle-group objects. # -# The options starting with x- are aliases for the same key without x- in -# the @limits object. As indicated by the x- prefix, this is not a stable -# interface and may be removed or changed incompatibly in the future. Use -# @limits for a supported stable interface. -# # @limits: limits to apply for this throttle group # +# Features: +# @unstable: All members starting with x- are aliases for the same key +# without x- in the @limits object. This is not a stable +# interface and may be removed or changed incompatibly in +# the future. Use @limits for a supported stable +# interface. +# # Since: 2.11 ## { 'struct': 'ThrottleGroupProperties', 'data': { '*limits': 'ThrottleLimits', - '*x-iops-total' : 'int', '*x-iops-total-max' : 'int', - '*x-iops-total-max-length' : 'int', '*x-iops-read' : 'int', - '*x-iops-read-max' : 'int', '*x-iops-read-max-length' : 'int', - '*x-iops-write' : 'int', '*x-iops-write-max' : 'int', - '*x-iops-write-max-length' : 'int', '*x-bps-total' : 'int', - '*x-bps-total-max' : 'int', '*x-bps-total-max-length' : 'int', - '*x-bps-read' : 'int', '*x-bps-read-max' : 'int', - '*x-bps-read-max-length' : 'int', '*x-bps-write' : 'int', - '*x-bps-write-max' : 'int', '*x-bps-write-max-length' : 'int', - '*x-iops-size' : 'int' } } + '*x-iops-total': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-iops-total-max': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-iops-total-max-length': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-iops-read': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-iops-read-max': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-iops-read-max-length': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-iops-write': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-iops-write-max': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-iops-write-max-length': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-bps-total': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-bps-total-max': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-bps-total-max-length': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-bps-read': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-bps-read-max': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-bps-read-max-length': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-bps-write': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-bps-write-max': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-bps-write-max-length': { 'type': 'int', + 'features': [ 'unstable' ] }, + '*x-iops-size': { 'type': 'int', + 'features': [ 'unstable' ] } } } ## # @block-stream: @@ -2896,6 +2942,12 @@ # for this device (default: none, forward the commands via SG_IO; # since 2.11) # @aio: AIO backend (default: threads) (since: 2.8) +# @aio-max-batch: maximum number of requests to batch together into a single +# submission in the AIO backend. The smallest value between +# this and the aio-max-batch value of the IOThread object is +# chosen. +# 0 means that the AIO backend will handle it automatically. +# (default: 0, since 6.2) # @locking: whether to enable file locking. If set to 'auto', only enable # when Open File Descriptor (OFD) locking API is available # (default: auto, since 2.10) @@ -2916,6 +2968,7 @@ # read-only when the last writer is detached. This # allows giving QEMU write permissions only on demand # when an operation actually needs write access. +# @unstable: Member x-check-cache-dropped is meant for debugging. # # Since: 2.9 ## @@ -2924,9 +2977,11 @@ '*pr-manager': 'str', '*locking': 'OnOffAuto', '*aio': 'BlockdevAioOptions', + '*aio-max-batch': 'int', '*drop-cache': {'type': 'bool', 'if': 'CONFIG_LINUX'}, - '*x-check-cache-dropped': 'bool' }, + '*x-check-cache-dropped': { 'type': 'bool', + 'features': [ 'unstable' ] } }, 'features': [ { 'name': 'dynamic-auto-read-only', 'if': 'CONFIG_POSIX' } ] } @@ -3132,6 +3187,7 @@ ## # @BlockdevQcow2EncryptionFormat: +# # @aes: AES-CBC with plain64 initialization vectors # # Since: 2.10 @@ -4040,13 +4096,16 @@ # future requests before a successful reconnect will # immediately fail. Default 0 (Since 4.2) # +# Features: +# @unstable: Member @x-dirty-bitmap is experimental. +# # Since: 2.9 ## { 'struct': 'BlockdevOptionsNbd', 'data': { 'server': 'SocketAddress', '*export': 'str', '*tls-creds': 'str', - '*x-dirty-bitmap': 'str', + '*x-dirty-bitmap': { 'type': 'str', 'features': [ 'unstable' ] }, '*reconnect-delay': 'uint32' } } ## @@ -4642,6 +4701,8 @@ # @adapter-type: The adapter type used to fill in the descriptor. Default: ide. # @hwversion: Hardware version. The meaningful options are "4" or "6". # Default: "4". +# @toolsversion: VMware guest tools version. +# Default: "2147483647" (Since 6.2) # @zeroed-grain: Whether to enable zeroed-grain feature for sparse subformats. # Default: false. # @@ -4655,6 +4716,7 @@ '*backing-file': 'str', '*adapter-type': 'BlockdevVmdkAdapterType', '*hwversion': 'str', + '*toolsversion': 'str', '*zeroed-grain': 'bool' } } @@ -4864,13 +4926,17 @@ # and replacement of an active keyslot # (possible loss of data if IO error happens) # +# Features: +# @unstable: This command is experimental. +# # Since: 5.1 ## { 'command': 'x-blockdev-amend', 'data': { 'job-id': 'str', 'node-name': 'str', 'options': 'BlockdevAmendOptions', - '*force': 'bool' } } + '*force': 'bool' }, + 'features': [ 'unstable' ] } ## # @BlockErrorAction: @@ -5241,16 +5307,18 @@ # # @node: the name of the node that will be added. # -# Note: this command is experimental, and its API is not stable. It -# does not support all kinds of operations, all kinds of children, nor -# all block drivers. +# Features: +# @unstable: This command is experimental, and its API is not stable. It +# does not support all kinds of operations, all kinds of +# children, nor all block drivers. # -# FIXME Removing children from a quorum node means introducing gaps in the -# child indices. This cannot be represented in the 'children' list of -# BlockdevOptionsQuorum, as returned by .bdrv_refresh_filename(). +# FIXME Removing children from a quorum node means introducing +# gaps in the child indices. This cannot be represented in the +# 'children' list of BlockdevOptionsQuorum, as returned by +# .bdrv_refresh_filename(). # -# Warning: The data in a new quorum child MUST be consistent with that of -# the rest of the array. +# Warning: The data in a new quorum child MUST be consistent +# with that of the rest of the array. # # Since: 2.7 # @@ -5279,7 +5347,8 @@ { 'command': 'x-blockdev-change', 'data' : { 'parent': 'str', '*child': 'str', - '*node': 'str' } } + '*node': 'str' }, + 'features': [ 'unstable' ] } ## # @x-blockdev-set-iothread: @@ -5296,8 +5365,9 @@ # @force: true if the node and its children should be moved when a BlockBackend # is already attached # -# Note: this command is experimental and intended for test cases that need -# control over IOThreads only. +# Features: +# @unstable: This command is experimental and intended for test cases that +# need control over IOThreads only. # # Since: 2.12 # @@ -5319,7 +5389,8 @@ { 'command': 'x-blockdev-set-iothread', 'data' : { 'node-name': 'str', 'iothread': 'StrOrNull', - '*force': 'bool' } } + '*force': 'bool' }, + 'features': [ 'unstable' ] } ## # @QuorumOpType: diff --git a/qapi/common.json b/qapi/common.json index 7c976296f0..412cc4f5ae 100644 --- a/qapi/common.json +++ b/qapi/common.json @@ -197,3 +197,14 @@ { 'enum': 'GrabToggleKeys', 'data': [ 'ctrl-ctrl', 'alt-alt', 'shift-shift','meta-meta', 'scrolllock', 'ctrl-scrolllock' ] } + +## +# @HumanReadableText: +# +# @human-readable-text: Formatted output intended for humans. +# +# Since: 6.2 +# +## +{ 'struct': 'HumanReadableText', + 'data': { 'human-readable-text': 'str' } } diff --git a/qapi/compat.json b/qapi/compat.json index ae3afc22df..dd7261ae2a 100644 --- a/qapi/compat.json +++ b/qapi/compat.json @@ -42,11 +42,20 @@ # with feature 'deprecated'. We may want to extend it to cover # semantic aspects, CLI, and experimental features. # +# Limitation: deprecated-output policy @hide is not implemented for +# enumeration values. They behave the same as with policy @accept. +# # @deprecated-input: how to handle deprecated input (default 'accept') # @deprecated-output: how to handle deprecated output (default 'accept') +# @unstable-input: how to handle unstable input (default 'accept') +# (since 6.2) +# @unstable-output: how to handle unstable output (default 'accept') +# (since 6.2) # # Since: 6.0 ## { 'struct': 'CompatPolicy', 'data': { '*deprecated-input': 'CompatPolicyInput', - '*deprecated-output': 'CompatPolicyOutput' } } + '*deprecated-output': 'CompatPolicyOutput', + '*unstable-input': 'CompatPolicyInput', + '*unstable-output': 'CompatPolicyOutput' } } diff --git a/qapi/introspect.json b/qapi/introspect.json index 39bd303778..183148b2e9 100644 --- a/qapi/introspect.json +++ b/qapi/introspect.json @@ -142,14 +142,38 @@ # # Additional SchemaInfo members for meta-type 'enum'. # -# @values: the enumeration type's values, in no particular order. +# @members: the enum type's members, in no particular order +# (since 6.2). +# +# @values: the enumeration type's member names, in no particular order. +# Redundant with @members. Just for backward compatibility. +# +# Features: +# @deprecated: Member @values is deprecated. Use @members instead. # # Values of this type are JSON string on the wire. # # Since: 2.5 ## { 'struct': 'SchemaInfoEnum', - 'data': { 'values': ['str'] } } + 'data': { 'members': [ 'SchemaInfoEnumMember' ], + 'values': { 'type': [ 'str' ], + 'features': [ 'deprecated' ] } } } + +## +# @SchemaInfoEnumMember: +# +# An object member. +# +# @name: the member's name, as defined in the QAPI schema. +# +# @features: names of features associated with the member, in no +# particular order. +# +# Since: 6.2 +## +{ 'struct': 'SchemaInfoEnumMember', + 'data': { 'name': 'str', '*features': [ 'str' ] } } ## # @SchemaInfoArray: diff --git a/qapi/machine.json b/qapi/machine.json index 32d47f4e35..067e3f5378 100644 --- a/qapi/machine.json +++ b/qapi/machine.json @@ -1194,13 +1194,36 @@ } } +## +# @SgxEPCDeviceInfo: +# +# Sgx EPC state information +# +# @id: device's ID +# +# @memaddr: physical address in memory, where device is mapped +# +# @size: size of memory that the device provides +# +# @memdev: memory backend linked with device +# +# Since: 6.2 +## +{ 'struct': 'SgxEPCDeviceInfo', + 'data': { '*id': 'str', + 'memaddr': 'size', + 'size': 'size', + 'memdev': 'str' + } +} + ## # @MemoryDeviceInfoKind: # # Since: 2.1 ## { 'enum': 'MemoryDeviceInfoKind', - 'data': [ 'dimm', 'nvdimm', 'virtio-pmem', 'virtio-mem' ] } + 'data': [ 'dimm', 'nvdimm', 'virtio-pmem', 'virtio-mem', 'sgx-epc' ] } ## # @PCDIMMDeviceInfoWrapper: @@ -1226,13 +1249,21 @@ { 'struct': 'VirtioMEMDeviceInfoWrapper', 'data': { 'data': 'VirtioMEMDeviceInfo' } } +## +# @SgxEPCDeviceInfoWrapper: +# +# Since: 6.2 +## +{ 'struct': 'SgxEPCDeviceInfoWrapper', + 'data': { 'data': 'SgxEPCDeviceInfo' } } + ## # @MemoryDeviceInfo: # # Union containing information about a memory device # # nvdimm is included since 2.12. virtio-pmem is included since 4.1. -# virtio-mem is included since 5.1. +# virtio-mem is included since 5.1. sgx-epc is included since 6.2. # # Since: 2.1 ## @@ -1242,10 +1273,36 @@ 'data': { 'dimm': 'PCDIMMDeviceInfoWrapper', 'nvdimm': 'PCDIMMDeviceInfoWrapper', 'virtio-pmem': 'VirtioPMEMDeviceInfoWrapper', - 'virtio-mem': 'VirtioMEMDeviceInfoWrapper' + 'virtio-mem': 'VirtioMEMDeviceInfoWrapper', + 'sgx-epc': 'SgxEPCDeviceInfoWrapper' } } +## +# @SgxEPC: +# +# Sgx EPC cmdline information +# +# @memdev: memory backend linked with device +# +# Since: 6.2 +## +{ 'struct': 'SgxEPC', + 'data': { 'memdev': 'str' } } + +## +# @SgxEPCProperties: +# +# SGX properties of machine types. +# +# @sgx-epc: list of ids of memory-backend-epc objects. +# +# Since: 6.2 +## +{ 'struct': 'SgxEPCProperties', + 'data': { 'sgx-epc': ['SgxEPC'] } +} + ## # @query-memory-devices: # @@ -1279,8 +1336,11 @@ # action). # # @id: device's ID +# # @size: the new size of memory that the device provides # +# @qom-path: path to the device object in the QOM tree (since 6.2) +# # Note: this event is rate-limited. # # Since: 5.1 @@ -1293,7 +1353,7 @@ # ## { 'event': 'MEMORY_DEVICE_SIZE_CHANGE', - 'data': { '*id': 'str', 'size': 'size' } } + 'data': { '*id': 'str', 'size': 'size', 'qom-path' : 'str'} } ## @@ -1305,6 +1365,10 @@ # # @msg: Informative message # +# Features: +# @deprecated: This event is deprecated. Use @DEVICE_UNPLUG_GUEST_ERROR +# instead. +# # Since: 2.4 # # Example: @@ -1317,7 +1381,8 @@ # ## { 'event': 'MEM_UNPLUG_ERROR', - 'data': { 'device': 'str', 'msg': 'str' } } + 'data': { 'device': 'str', 'msg': 'str' }, + 'features': ['deprecated'] } ## # @SMPConfiguration: @@ -1331,7 +1396,7 @@ # # @dies: number of dies per socket in the CPU topology # -# @cores: number of cores per thread in the CPU topology +# @cores: number of cores per die in the CPU topology # # @threads: number of threads per core in the CPU topology # @@ -1346,3 +1411,149 @@ '*cores': 'int', '*threads': 'int', '*maxcpus': 'int' } } + +## +# @x-query-irq: +# +# Query interrupt statistics +# +# Features: +# @unstable: This command is meant for debugging. +# +# Returns: interrupt statistics +# +# Since: 6.2 +## +{ 'command': 'x-query-irq', + 'returns': 'HumanReadableText', + 'features': [ 'unstable' ] } + +## +# @x-query-jit: +# +# Query TCG compiler statistics +# +# Features: +# @unstable: This command is meant for debugging. +# +# Returns: TCG compiler statistics +# +# Since: 6.2 +## +{ 'command': 'x-query-jit', + 'returns': 'HumanReadableText', + 'if': 'CONFIG_TCG', + 'features': [ 'unstable' ] } + +## +# @x-query-numa: +# +# Query NUMA topology information +# +# Features: +# @unstable: This command is meant for debugging. +# +# Returns: topology information +# +# Since: 6.2 +## +{ 'command': 'x-query-numa', + 'returns': 'HumanReadableText', + 'features': [ 'unstable' ] } + +## +# @x-query-opcount: +# +# Query TCG opcode counters +# +# Features: +# @unstable: This command is meant for debugging. +# +# Returns: TCG opcode counters +# +# Since: 6.2 +## +{ 'command': 'x-query-opcount', + 'returns': 'HumanReadableText', + 'if': 'CONFIG_TCG', + 'features': [ 'unstable' ] } + +## +# @x-query-profile: +# +# Query TCG profiling information +# +# Features: +# @unstable: This command is meant for debugging. +# +# Returns: profile information +# +# Since: 6.2 +## +{ 'command': 'x-query-profile', + 'returns': 'HumanReadableText', + 'features': [ 'unstable' ] } + +## +# @x-query-ramblock: +# +# Query system ramblock information +# +# Features: +# @unstable: This command is meant for debugging. +# +# Returns: system ramblock information +# +# Since: 6.2 +## +{ 'command': 'x-query-ramblock', + 'returns': 'HumanReadableText', + 'features': [ 'unstable' ] } + +## +# @x-query-rdma: +# +# Query RDMA state +# +# Features: +# @unstable: This command is meant for debugging. +# +# Returns: RDMA state +# +# Since: 6.2 +## +{ 'command': 'x-query-rdma', + 'returns': 'HumanReadableText', + 'features': [ 'unstable' ] } + +## +# @x-query-roms: +# +# Query information on the registered ROMS +# +# Features: +# @unstable: This command is meant for debugging. +# +# Returns: registered ROMs +# +# Since: 6.2 +## +{ 'command': 'x-query-roms', + 'returns': 'HumanReadableText', + 'features': [ 'unstable' ] } + +## +# @x-query-usb: +# +# Query information on the USB devices +# +# Features: +# @unstable: This command is meant for debugging. +# +# Returns: USB device information +# +# Since: 6.2 +## +{ 'command': 'x-query-usb', + 'returns': 'HumanReadableText', + 'features': [ 'unstable' ] } diff --git a/qapi/meson.build b/qapi/meson.build index c356a385e3..c0c49c15e4 100644 --- a/qapi/meson.build +++ b/qapi/meson.build @@ -10,6 +10,9 @@ util_ss.add(files( 'string-input-visitor.c', 'string-output-visitor.c', )) +if have_system + util_ss.add(files('qapi-type-helpers.c')) +endif if have_system or have_tools util_ss.add(files( 'qmp-dispatch.c', diff --git a/qapi/migration.json b/qapi/migration.json index 88f07baedd..bbfd48cf0b 100644 --- a/qapi/migration.json +++ b/qapi/migration.json @@ -452,14 +452,20 @@ # procedure starts. The VM RAM is saved with running VM. # (since 6.0) # +# Features: +# @unstable: Members @x-colo and @x-ignore-shared are experimental. +# # Since: 1.2 ## { 'enum': 'MigrationCapability', 'data': ['xbzrle', 'rdma-pin-all', 'auto-converge', 'zero-blocks', - 'compress', 'events', 'postcopy-ram', 'x-colo', 'release-ram', + 'compress', 'events', 'postcopy-ram', + { 'name': 'x-colo', 'features': [ 'unstable' ] }, + 'release-ram', 'block', 'return-path', 'pause-before-switchover', 'multifd', 'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate', - 'x-ignore-shared', 'validate-uuid', 'background-snapshot'] } + { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] }, + 'validate-uuid', 'background-snapshot'] } ## # @MigrationCapabilityStatus: @@ -743,6 +749,9 @@ # block device name if there is one, and to their node name # otherwise. (Since 5.2) # +# Features: +# @unstable: Member @x-checkpoint-delay is experimental. +# # Since: 2.4 ## { 'enum': 'MigrationParameter', @@ -753,7 +762,9 @@ 'cpu-throttle-initial', 'cpu-throttle-increment', 'cpu-throttle-tailslow', 'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth', - 'downtime-limit', 'x-checkpoint-delay', 'block-incremental', + 'downtime-limit', + { 'name': 'x-checkpoint-delay', 'features': [ 'unstable' ] }, + 'block-incremental', 'multifd-channels', 'xbzrle-cache-size', 'max-postcopy-bandwidth', 'max-cpu-throttle', 'multifd-compression', @@ -903,6 +914,9 @@ # block device name if there is one, and to their node name # otherwise. (Since 5.2) # +# Features: +# @unstable: Member @x-checkpoint-delay is experimental. +# # Since: 2.4 ## # TODO either fuse back into MigrationParameters, or make @@ -925,7 +939,8 @@ '*tls-authz': 'StrOrNull', '*max-bandwidth': 'size', '*downtime-limit': 'uint64', - '*x-checkpoint-delay': 'uint32', + '*x-checkpoint-delay': { 'type': 'uint32', + 'features': [ 'unstable' ] }, '*block-incremental': 'bool', '*multifd-channels': 'uint8', '*xbzrle-cache-size': 'size', @@ -1099,6 +1114,9 @@ # block device name if there is one, and to their node name # otherwise. (Since 5.2) # +# Features: +# @unstable: Member @x-checkpoint-delay is experimental. +# # Since: 2.4 ## { 'struct': 'MigrationParameters', @@ -1119,7 +1137,8 @@ '*tls-authz': 'str', '*max-bandwidth': 'size', '*downtime-limit': 'uint64', - '*x-checkpoint-delay': 'uint32', + '*x-checkpoint-delay': { 'type': 'uint32', + 'features': [ 'unstable' ] }, '*block-incremental': 'bool', '*multifd-channels': 'uint8', '*xbzrle-cache-size': 'size', @@ -1351,6 +1370,9 @@ # If sent to the Secondary, the Secondary side will run failover work, # then takes over server operation to become the service VM. # +# Features: +# @unstable: This command is experimental. +# # Since: 2.8 # # Example: @@ -1359,7 +1381,8 @@ # <- { "return": {} } # ## -{ 'command': 'x-colo-lost-heartbeat' } +{ 'command': 'x-colo-lost-heartbeat', + 'features': [ 'unstable' ] } ## # @migrate_cancel: @@ -1708,6 +1731,21 @@ { 'event': 'UNPLUG_PRIMARY', 'data': { 'device-id': 'str' } } +## +# @DirtyRateVcpu: +# +# Dirty rate of vcpu. +# +# @id: vcpu index. +# +# @dirty-rate: dirty rate. +# +# Since: 6.2 +# +## +{ 'struct': 'DirtyRateVcpu', + 'data': { 'id': 'int', 'dirty-rate': 'int64' } } + ## # @DirtyRateStatus: # @@ -1725,6 +1763,23 @@ { 'enum': 'DirtyRateStatus', 'data': [ 'unstarted', 'measuring', 'measured'] } +## +# @DirtyRateMeasureMode: +# +# An enumeration of mode of measuring dirtyrate. +# +# @page-sampling: calculate dirtyrate by sampling pages. +# +# @dirty-ring: calculate dirtyrate by dirty ring. +# +# @dirty-bitmap: calculate dirtyrate by dirty bitmap. +# +# Since: 6.2 +# +## +{ 'enum': 'DirtyRateMeasureMode', + 'data': ['page-sampling', 'dirty-ring', 'dirty-bitmap'] } + ## # @DirtyRateInfo: # @@ -1743,6 +1798,12 @@ # @sample-pages: page count per GB for sample dirty pages # the default value is 512 (since 6.1) # +# @mode: mode containing method of calculate dirtyrate includes +# 'page-sampling' and 'dirty-ring' (Since 6.2) +# +# @vcpu-dirty-rate: dirtyrate for each vcpu if dirty-ring +# mode specified (Since 6.2) +# # Since: 5.2 # ## @@ -1751,7 +1812,9 @@ 'status': 'DirtyRateStatus', 'start-time': 'int64', 'calc-time': 'int64', - 'sample-pages': 'uint64'} } + 'sample-pages': 'uint64', + 'mode': 'DirtyRateMeasureMode', + '*vcpu-dirty-rate': [ 'DirtyRateVcpu' ] } } ## # @calc-dirty-rate: @@ -1763,6 +1826,9 @@ # @sample-pages: page count per GB for sample dirty pages # the default value is 512 (since 6.1) # +# @mode: mechanism of calculating dirtyrate includes +# 'page-sampling' and 'dirty-ring' (Since 6.1) +# # Since: 5.2 # # Example: @@ -1771,7 +1837,8 @@ # ## { 'command': 'calc-dirty-rate', 'data': {'calc-time': 'int64', - '*sample-pages': 'int'} } + '*sample-pages': 'int', + '*mode': 'DirtyRateMeasureMode'} } ## # @query-dirty-rate: diff --git a/qapi/misc-target.json b/qapi/misc-target.json index 3b05ad3dbf..5aa2b95b7d 100644 --- a/qapi/misc-target.json +++ b/qapi/misc-target.json @@ -229,6 +229,46 @@ 'data': { 'packet-header': 'str', 'secret': 'str', '*gpa': 'uint64' }, 'if': 'TARGET_I386' } +## +# @SevAttestationReport: +# +# The struct describes attestation report for a Secure Encrypted +# Virtualization feature. +# +# @data: guest attestation report (base64 encoded) +# +# +# Since: 6.1 +## +{ 'struct': 'SevAttestationReport', + 'data': { 'data': 'str'}, + 'if': 'TARGET_I386' } + +## +# @query-sev-attestation-report: +# +# This command is used to get the SEV attestation report, and is +# supported on AMD X86 platforms only. +# +# @mnonce: a random 16 bytes value encoded in base64 (it will be +# included in report) +# +# Returns: SevAttestationReport objects. +# +# Since: 6.1 +# +# Example: +# +# -> { "execute" : "query-sev-attestation-report", +# "arguments": { "mnonce": "aaaaaaa" } } +# <- { "return" : { "data": "aaaaaaaabbbddddd"} } +# +## +{ 'command': 'query-sev-attestation-report', + 'data': { 'mnonce': 'str' }, + 'returns': 'SevAttestationReport', + 'if': 'TARGET_I386' } + ## # @dump-skeys: # @@ -298,38 +338,62 @@ ## -# @SevAttestationReport: +# @SGXInfo: # -# The struct describes attestation report for a Secure Encrypted Virtualization -# feature. +# Information about intel Safe Guard eXtension (SGX) support # -# @data: guest attestation report (base64 encoded) +# @sgx: true if SGX is supported # +# @sgx1: true if SGX1 is supported # -# Since: 6.1 +# @sgx2: true if SGX2 is supported +# +# @flc: true if FLC is supported +# +# @section-size: The EPC section size for guest +# +# Since: 6.2 ## -{ 'struct': 'SevAttestationReport', - 'data': { 'data': 'str'}, - 'if': 'TARGET_I386' } +{ 'struct': 'SGXInfo', + 'data': { 'sgx': 'bool', + 'sgx1': 'bool', + 'sgx2': 'bool', + 'flc': 'bool', + 'section-size': 'uint64'}, + 'if': 'TARGET_I386' } ## -# @query-sev-attestation-report: +# @query-sgx: # -# This command is used to get the SEV attestation report, and is supported on AMD -# X86 platforms only. +# Returns information about SGX # -# @mnonce: a random 16 bytes value encoded in base64 (it will be included in report) +# Returns: @SGXInfo # -# Returns: SevAttestationReport objects. -# -# Since: 6.1 +# Since: 6.2 # # Example: # -# -> { "execute" : "query-sev-attestation-report", "arguments": { "mnonce": "aaaaaaa" } } -# <- { "return" : { "data": "aaaaaaaabbbddddd"} } +# -> { "execute": "query-sgx" } +# <- { "return": { "sgx": true, "sgx1" : true, "sgx2" : true, +# "flc": true, "section-size" : 0 } } # ## -{ 'command': 'query-sev-attestation-report', 'data': { 'mnonce': 'str' }, - 'returns': 'SevAttestationReport', - 'if': 'TARGET_I386' } +{ 'command': 'query-sgx', 'returns': 'SGXInfo', 'if': 'TARGET_I386' } + +## +# @query-sgx-capabilities: +# +# Returns information from host SGX capabilities +# +# Returns: @SGXInfo +# +# Since: 6.2 +# +# Example: +# +# -> { "execute": "query-sgx-capabilities" } +# <- { "return": { "sgx": true, "sgx1" : true, "sgx2" : true, +# "flc": true, "section-size" : 0 } } +# +## +{ 'command': 'query-sgx-capabilities', 'returns': 'SGXInfo', 'if': 'TARGET_I386' } diff --git a/qapi/misc.json b/qapi/misc.json index 5c2ca3b556..358548abe1 100644 --- a/qapi/misc.json +++ b/qapi/misc.json @@ -185,6 +185,9 @@ # available during the preconfig state (i.e. when the --preconfig command # line option was in use). # +# Features: +# @unstable: This command is experimental. +# # Since 3.0 # # Returns: nothing @@ -195,7 +198,8 @@ # <- { "return": {} } # ## -{ 'command': 'x-exit-preconfig', 'allow-preconfig': true } +{ 'command': 'x-exit-preconfig', 'allow-preconfig': true, + 'features': [ 'unstable' ] } ## # @human-monitor-command: diff --git a/qapi/qapi-forward-visitor.c b/qapi/qapi-forward-visitor.c index a4b111e22a..4ea7e0bec3 100644 --- a/qapi/qapi-forward-visitor.c +++ b/qapi/qapi-forward-visitor.c @@ -246,25 +246,27 @@ static void forward_field_optional(Visitor *v, const char *name, bool *present) visit_optional(ffv->target, name, present); } -static bool forward_field_deprecated_accept(Visitor *v, const char *name, - Error **errp) +static bool forward_field_policy_reject(Visitor *v, const char *name, + unsigned special_features, + Error **errp) { ForwardFieldVisitor *ffv = to_ffv(v); if (!forward_field_translate_name(ffv, &name, errp)) { - return false; + return true; } - return visit_deprecated_accept(ffv->target, name, errp); + return visit_policy_reject(ffv->target, name, special_features, errp); } -static bool forward_field_deprecated(Visitor *v, const char *name) +static bool forward_field_policy_skip(Visitor *v, const char *name, + unsigned special_features) { ForwardFieldVisitor *ffv = to_ffv(v); if (!forward_field_translate_name(ffv, &name, NULL)) { - return false; + return true; } - return visit_deprecated(ffv->target, name); + return visit_policy_skip(ffv->target, name, special_features); } static void forward_field_complete(Visitor *v, void *opaque) @@ -313,8 +315,8 @@ Visitor *visitor_forward_field(Visitor *target, const char *from, const char *to v->visitor.type_any = forward_field_type_any; v->visitor.type_null = forward_field_type_null; v->visitor.optional = forward_field_optional; - v->visitor.deprecated_accept = forward_field_deprecated_accept; - v->visitor.deprecated = forward_field_deprecated; + v->visitor.policy_reject = forward_field_policy_reject; + v->visitor.policy_skip = forward_field_policy_skip; v->visitor.complete = forward_field_complete; v->visitor.free = forward_field_free; diff --git a/qapi/qapi-type-helpers.c b/qapi/qapi-type-helpers.c new file mode 100644 index 0000000000..f76b34f647 --- /dev/null +++ b/qapi/qapi-type-helpers.c @@ -0,0 +1,23 @@ +/* + * QAPI common helper functions + * + * This file provides helper functions related to types defined + * in the QAPI schema. + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/type-helpers.h" + +HumanReadableText *human_readable_text_from_str(GString *str) +{ + HumanReadableText *ret = g_new0(HumanReadableText, 1); + + ret->human_readable_text = g_steal_pointer(&str->str); + + return ret; +} diff --git a/qapi/qapi-util.c b/qapi/qapi-util.c index 3c24bb3d45..fda7044539 100644 --- a/qapi/qapi-util.c +++ b/qapi/qapi-util.c @@ -11,10 +11,53 @@ */ #include "qemu/osdep.h" +#include "qapi/compat-policy.h" #include "qapi/error.h" #include "qemu/ctype.h" #include "qapi/qmp/qerror.h" +CompatPolicy compat_policy; + +static bool compat_policy_input_ok1(const char *adjective, + CompatPolicyInput policy, + ErrorClass error_class, + const char *kind, const char *name, + Error **errp) +{ + switch (policy) { + case COMPAT_POLICY_INPUT_ACCEPT: + return true; + case COMPAT_POLICY_INPUT_REJECT: + error_set(errp, error_class, "%s %s %s disabled by policy", + adjective, kind, name); + return false; + case COMPAT_POLICY_INPUT_CRASH: + default: + abort(); + } +} + +bool compat_policy_input_ok(unsigned special_features, + const CompatPolicy *policy, + ErrorClass error_class, + const char *kind, const char *name, + Error **errp) +{ + if ((special_features & 1u << QAPI_DEPRECATED) + && !compat_policy_input_ok1("Deprecated", + policy->deprecated_input, + error_class, kind, name, errp)) { + return false; + } + if ((special_features & (1u << QAPI_UNSTABLE)) + && !compat_policy_input_ok1("Unstable", + policy->unstable_input, + error_class, kind, name, errp)) { + return false; + } + return true; +} + const char *qapi_enum_lookup(const QEnumLookup *lookup, int val) { assert(val >= 0 && val < lookup->size); diff --git a/qapi/qapi-visit-core.c b/qapi/qapi-visit-core.c index a641adec51..6c13510a2b 100644 --- a/qapi/qapi-visit-core.c +++ b/qapi/qapi-visit-core.c @@ -13,12 +13,17 @@ */ #include "qemu/osdep.h" +#include "qapi/compat-policy.h" #include "qapi/error.h" #include "qapi/qmp/qerror.h" #include "qapi/visitor.h" #include "qapi/visitor-impl.h" #include "trace.h" +/* Zero-initialization must result in default policy */ +QEMU_BUILD_BUG_ON(COMPAT_POLICY_INPUT_ACCEPT || COMPAT_POLICY_OUTPUT_ACCEPT); + + void visit_complete(Visitor *v, void *opaque) { assert(v->type != VISITOR_OUTPUT || v->complete); @@ -135,22 +140,29 @@ bool visit_optional(Visitor *v, const char *name, bool *present) return *present; } -bool visit_deprecated_accept(Visitor *v, const char *name, Error **errp) +bool visit_policy_reject(Visitor *v, const char *name, + unsigned special_features, Error **errp) { - trace_visit_deprecated_accept(v, name); - if (v->deprecated_accept) { - return v->deprecated_accept(v, name, errp); + trace_visit_policy_reject(v, name); + if (v->policy_reject) { + return v->policy_reject(v, name, special_features, errp); } - return true; + return false; } -bool visit_deprecated(Visitor *v, const char *name) +bool visit_policy_skip(Visitor *v, const char *name, + unsigned special_features) { - trace_visit_deprecated(v, name); - if (v->deprecated) { - return v->deprecated(v, name); + trace_visit_policy_skip(v, name); + if (v->policy_skip) { + return v->policy_skip(v, name, special_features); } - return true; + return false; +} + +void visit_set_policy(Visitor *v, CompatPolicy *policy) +{ + v->compat_policy = *policy; } bool visit_is_input(Visitor *v) @@ -384,7 +396,7 @@ static bool input_type_enum(Visitor *v, const char *name, int *obj, const QEnumLookup *lookup, Error **errp) { int64_t value; - char *enum_str; + g_autofree char *enum_str = NULL; if (!visit_type_str(v, name, &enum_str, errp)) { return false; @@ -392,12 +404,19 @@ static bool input_type_enum(Visitor *v, const char *name, int *obj, value = qapi_enum_parse(lookup, enum_str, -1, NULL); if (value < 0) { - error_setg(errp, QERR_INVALID_PARAMETER, enum_str); - g_free(enum_str); + error_setg(errp, "Parameter '%s' does not accept value '%s'", + name ? name : "null", enum_str); + return false; + } + + if (lookup->special_features + && !compat_policy_input_ok(lookup->special_features[value], + &v->compat_policy, + ERROR_CLASS_GENERIC_ERROR, + "value", enum_str, errp)) { return false; } - g_free(enum_str); *obj = value; return true; } diff --git a/qapi/qdev.json b/qapi/qdev.json index b83178220b..69656b14df 100644 --- a/qapi/qdev.json +++ b/qapi/qdev.json @@ -32,17 +32,23 @@ ## # @device_add: # +# Add a device. +# # @driver: the name of the new device's driver # # @bus: the device's parent bus (device tree path) # # @id: the device's ID, must be unique # -# Additional arguments depend on the type. -# -# Add a device. +# Features: +# @json-cli: If present, the "-device" command line option supports JSON +# syntax with a structure identical to the arguments of this +# command. # # Notes: +# +# Additional arguments depend on the type. +# # 1. For detailed information about this command, please refer to the # 'docs/qdev-device-use.txt' file. # @@ -67,7 +73,8 @@ ## { 'command': 'device_add', 'data': {'driver': 'str', '*bus': 'str', '*id': 'str'}, - 'gen': false } # so we can get the additional arguments + 'gen': false, # so we can get the additional arguments + 'features': ['json-cli'] } ## # @device_del: @@ -84,7 +91,9 @@ # This command merely requests that the guest begin the hot removal # process. Completion of the device removal process is signaled with a # DEVICE_DELETED event. Guest reset will automatically complete removal -# for all devices. +# for all devices. If a guest-side error in the hot removal process is +# detected, the device will not be removed and a DEVICE_UNPLUG_GUEST_ERROR +# event is sent. Some errors cannot be detected. # # Since: 0.14 # @@ -108,9 +117,9 @@ # At this point, it's safe to reuse the specified device ID. Device removal can # be initiated by the guest or by HMP/QMP commands. # -# @device: device name +# @device: the device's ID if it has one # -# @path: device path +# @path: the device's QOM path # # Since: 1.5 # @@ -124,3 +133,26 @@ ## { 'event': 'DEVICE_DELETED', 'data': { '*device': 'str', 'path': 'str' } } + +## +# @DEVICE_UNPLUG_GUEST_ERROR: +# +# Emitted when a device hot unplug fails due to a guest reported error. +# +# @device: the device's ID if it has one +# +# @path: the device's QOM path +# +# Since: 6.2 +# +# Example: +# +# <- { "event": "DEVICE_UNPLUG_GUEST_ERROR" +# "data": { "device": "core1", +# "path": "/machine/peripheral/core1" }, +# }, +# "timestamp": { "seconds": 1615570772, "microseconds": 202844 } } +# +## +{ 'event': 'DEVICE_UNPLUG_GUEST_ERROR', + 'data': { '*device': 'str', 'path': 'str' } } diff --git a/qapi/qmp-dispatch.c b/qapi/qmp-dispatch.c index 59600210ce..d378bccac7 100644 --- a/qapi/qmp-dispatch.c +++ b/qapi/qmp-dispatch.c @@ -26,13 +26,11 @@ #include "qemu/coroutine.h" #include "qemu/main-loop.h" -CompatPolicy compat_policy; - Visitor *qobject_input_visitor_new_qmp(QObject *obj) { Visitor *v = qobject_input_visitor_new(obj); - qobject_input_visitor_set_policy(v, compat_policy.deprecated_input); + visit_set_policy(v, &compat_policy); return v; } @@ -40,7 +38,7 @@ Visitor *qobject_output_visitor_new_qmp(QObject **result) { Visitor *v = qobject_output_visitor_new(result); - qobject_output_visitor_set_policy(v, compat_policy.deprecated_output); + visit_set_policy(v, &compat_policy); return v; } @@ -176,19 +174,10 @@ QDict *qmp_dispatch(const QmpCommandList *cmds, QObject *request, "The command %s has not been found", command); goto out; } - if (cmd->options & QCO_DEPRECATED) { - switch (compat_policy.deprecated_input) { - case COMPAT_POLICY_INPUT_ACCEPT: - break; - case COMPAT_POLICY_INPUT_REJECT: - error_set(&err, ERROR_CLASS_COMMAND_NOT_FOUND, - "Deprecated command %s disabled by policy", - command); - goto out; - case COMPAT_POLICY_INPUT_CRASH: - default: - abort(); - } + if (!compat_policy_input_ok(cmd->special_features, &compat_policy, + ERROR_CLASS_COMMAND_NOT_FOUND, + "command", command, &err)) { + goto out; } if (!cmd->enabled) { error_set(&err, ERROR_CLASS_COMMAND_NOT_FOUND, diff --git a/qapi/qmp-registry.c b/qapi/qmp-registry.c index f78c064aae..485bc5e6fc 100644 --- a/qapi/qmp-registry.c +++ b/qapi/qmp-registry.c @@ -16,7 +16,8 @@ #include "qapi/qmp/dispatch.h" void qmp_register_command(QmpCommandList *cmds, const char *name, - QmpCommandFunc *fn, QmpCommandOptions options) + QmpCommandFunc *fn, QmpCommandOptions options, + unsigned special_features) { QmpCommand *cmd = g_malloc0(sizeof(*cmd)); @@ -27,6 +28,7 @@ void qmp_register_command(QmpCommandList *cmds, const char *name, cmd->fn = fn; cmd->enabled = true; cmd->options = options; + cmd->special_features = special_features; QTAILQ_INSERT_TAIL(cmds, cmd, node); } diff --git a/qapi/qobject-input-visitor.c b/qapi/qobject-input-visitor.c index 04b790412e..f0b4c7ca9d 100644 --- a/qapi/qobject-input-visitor.c +++ b/qapi/qobject-input-visitor.c @@ -44,7 +44,6 @@ typedef struct StackObject { struct QObjectInputVisitor { Visitor visitor; - CompatPolicyInput deprecated_policy; /* Root of visit at visitor creation. */ QObject *root; @@ -664,22 +663,13 @@ static void qobject_input_optional(Visitor *v, const char *name, bool *present) *present = true; } -static bool qobject_input_deprecated_accept(Visitor *v, const char *name, - Error **errp) +static bool qobject_input_policy_reject(Visitor *v, const char *name, + unsigned special_features, + Error **errp) { - QObjectInputVisitor *qiv = to_qiv(v); - - switch (qiv->deprecated_policy) { - case COMPAT_POLICY_INPUT_ACCEPT: - return true; - case COMPAT_POLICY_INPUT_REJECT: - error_setg(errp, "Deprecated parameter '%s' disabled by policy", - name); - return false; - case COMPAT_POLICY_INPUT_CRASH: - default: - abort(); - } + return !compat_policy_input_ok(special_features, &v->compat_policy, + ERROR_CLASS_GENERIC_ERROR, + "parameter", name, errp); } static void qobject_input_free(Visitor *v) @@ -716,7 +706,7 @@ static QObjectInputVisitor *qobject_input_visitor_base_new(QObject *obj) v->visitor.end_list = qobject_input_end_list; v->visitor.start_alternate = qobject_input_start_alternate; v->visitor.optional = qobject_input_optional; - v->visitor.deprecated_accept = qobject_input_deprecated_accept; + v->visitor.policy_reject = qobject_input_policy_reject; v->visitor.free = qobject_input_free; v->root = qobject_ref(obj); @@ -739,14 +729,6 @@ Visitor *qobject_input_visitor_new(QObject *obj) return &v->visitor; } -void qobject_input_visitor_set_policy(Visitor *v, - CompatPolicyInput deprecated) -{ - QObjectInputVisitor *qiv = to_qiv(v); - - qiv->deprecated_policy = deprecated; -} - Visitor *qobject_input_visitor_new_keyval(QObject *obj) { QObjectInputVisitor *v = qobject_input_visitor_base_new(obj); diff --git a/qapi/qobject-output-visitor.c b/qapi/qobject-output-visitor.c index e4873308d4..74770edd73 100644 --- a/qapi/qobject-output-visitor.c +++ b/qapi/qobject-output-visitor.c @@ -32,7 +32,6 @@ typedef struct QStackEntry { struct QObjectOutputVisitor { Visitor visitor; - CompatPolicyOutput deprecated_policy; QSLIST_HEAD(, QStackEntry) stack; /* Stack of unfinished containers */ QObject *root; /* Root of the output visit */ @@ -210,11 +209,15 @@ static bool qobject_output_type_null(Visitor *v, const char *name, return true; } -static bool qobject_output_deprecated(Visitor *v, const char *name) +static bool qobject_output_policy_skip(Visitor *v, const char *name, + unsigned special_features) { - QObjectOutputVisitor *qov = to_qov(v); + CompatPolicy *pol = &v->compat_policy; - return qov->deprecated_policy != COMPAT_POLICY_OUTPUT_HIDE; + return ((special_features & 1u << QAPI_DEPRECATED) + && pol->deprecated_output == COMPAT_POLICY_OUTPUT_HIDE) + || ((special_features & 1u << QAPI_UNSTABLE) + && pol->unstable_output == COMPAT_POLICY_OUTPUT_HIDE); } /* Finish building, and return the root object. @@ -266,7 +269,7 @@ Visitor *qobject_output_visitor_new(QObject **result) v->visitor.type_number = qobject_output_type_number; v->visitor.type_any = qobject_output_type_any; v->visitor.type_null = qobject_output_type_null; - v->visitor.deprecated = qobject_output_deprecated; + v->visitor.policy_skip = qobject_output_policy_skip; v->visitor.complete = qobject_output_complete; v->visitor.free = qobject_output_free; @@ -275,11 +278,3 @@ Visitor *qobject_output_visitor_new(QObject **result) return &v->visitor; } - -void qobject_output_visitor_set_policy(Visitor *v, - CompatPolicyOutput deprecated) -{ - QObjectOutputVisitor *qov = to_qov(v); - - qov->deprecated_policy = deprecated; -} diff --git a/qapi/qom.json b/qapi/qom.json index a25616bc7a..ccd1167808 100644 --- a/qapi/qom.json +++ b/qapi/qom.json @@ -559,10 +559,8 @@ # for ramblock-id. Disable this for 4.0 # machine types or older to allow # migration with newer QEMU versions. -# This option is considered stable -# despite the x- prefix. (default: -# false generally, but true for machine -# types <= 4.0) +# (default: false generally, +# but true for machine types <= 4.0) # # Note: prealloc=true and reserve=false cannot be set at the same time. With # reserve=true, the behavior depends on the operating system: for example, @@ -647,6 +645,23 @@ '*hugetlbsize': 'size', '*seal': 'bool' } } +## +# @MemoryBackendEpcProperties: +# +# Properties for memory-backend-epc objects. +# +# The @share boolean option is true by default with epc +# +# The @merge boolean option is false by default with epc +# +# The @dump boolean option is false by default with epc +# +# Since: 6.2 +## +{ 'struct': 'MemoryBackendEpcProperties', + 'base': 'MemoryBackendProperties', + 'data': {} } + ## # @PrManagerHelperProperties: # @@ -768,6 +783,9 @@ ## # @ObjectType: # +# Features: +# @unstable: Member @x-remote-object is experimental. +# # Since: 6.0 ## { 'enum': 'ObjectType', @@ -777,7 +795,8 @@ 'authz-pam', 'authz-simple', 'can-bus', - 'can-host-socketcan', + { 'name': 'can-host-socketcan', + 'if': 'CONFIG_LINUX' }, 'colo-compare', 'cryptodev-backend', 'cryptodev-backend-builtin', @@ -791,20 +810,26 @@ 'filter-replay', 'filter-rewriter', 'input-barrier', - 'input-linux', + { 'name': 'input-linux', + 'if': 'CONFIG_LINUX' }, 'iothread', + { 'name': 'memory-backend-epc', + 'if': 'CONFIG_LINUX' }, 'memory-backend-file', { 'name': 'memory-backend-memfd', 'if': 'CONFIG_LINUX' }, 'memory-backend-ram', 'pef-guest', - 'pr-manager-helper', + { 'name': 'pr-manager-helper', + 'if': 'CONFIG_LINUX' }, 'qtest', 'rng-builtin', 'rng-egd', - 'rng-random', + { 'name': 'rng-random', + 'if': 'CONFIG_POSIX' }, 'secret', - 'secret_keyring', + { 'name': 'secret_keyring', + 'if': 'CONFIG_SECRET_KEYRING' }, 'sev-guest', 's390-pv-guest', 'throttle-group', @@ -812,7 +837,7 @@ 'tls-creds-psk', 'tls-creds-x509', 'tls-cipher-suites', - 'x-remote-object' + { 'name': 'x-remote-object', 'features': [ 'unstable' ] } ] } ## @@ -835,7 +860,8 @@ 'authz-listfile': 'AuthZListFileProperties', 'authz-pam': 'AuthZPAMProperties', 'authz-simple': 'AuthZSimpleProperties', - 'can-host-socketcan': 'CanHostSocketcanProperties', + 'can-host-socketcan': { 'type': 'CanHostSocketcanProperties', + 'if': 'CONFIG_LINUX' }, 'colo-compare': 'ColoCompareProperties', 'cryptodev-backend': 'CryptodevBackendProperties', 'cryptodev-backend-builtin': 'CryptodevBackendProperties', @@ -849,19 +875,25 @@ 'filter-replay': 'NetfilterProperties', 'filter-rewriter': 'FilterRewriterProperties', 'input-barrier': 'InputBarrierProperties', - 'input-linux': 'InputLinuxProperties', + 'input-linux': { 'type': 'InputLinuxProperties', + 'if': 'CONFIG_LINUX' }, 'iothread': 'IothreadProperties', + 'memory-backend-epc': { 'type': 'MemoryBackendEpcProperties', + 'if': 'CONFIG_LINUX' }, 'memory-backend-file': 'MemoryBackendFileProperties', 'memory-backend-memfd': { 'type': 'MemoryBackendMemfdProperties', 'if': 'CONFIG_LINUX' }, 'memory-backend-ram': 'MemoryBackendProperties', - 'pr-manager-helper': 'PrManagerHelperProperties', + 'pr-manager-helper': { 'type': 'PrManagerHelperProperties', + 'if': 'CONFIG_LINUX' }, 'qtest': 'QtestProperties', 'rng-builtin': 'RngProperties', 'rng-egd': 'RngEgdProperties', - 'rng-random': 'RngRandomProperties', + 'rng-random': { 'type': 'RngRandomProperties', + 'if': 'CONFIG_POSIX' }, 'secret': 'SecretProperties', - 'secret_keyring': 'SecretKeyringProperties', + 'secret_keyring': { 'type': 'SecretKeyringProperties', + 'if': 'CONFIG_SECRET_KEYRING' }, 'sev-guest': 'SevGuestProperties', 'throttle-group': 'ThrottleGroupProperties', 'tls-creds-anon': 'TlsCredsAnonProperties', diff --git a/qapi/sockets.json b/qapi/sockets.json index ef4b16d6f2..5773d9fcc4 100644 --- a/qapi/sockets.json +++ b/qapi/sockets.json @@ -69,7 +69,7 @@ '*ipv4': 'bool', '*ipv6': 'bool', '*keep-alive': 'bool', - '*mptcp': { 'type': 'bool', 'if': 'IPPROTO_MPTCP' } } } + '*mptcp': { 'type': 'bool', 'if': 'HAVE_IPPROTO_MPTCP' } } } ## # @UnixSocketAddress: diff --git a/qapi/trace-events b/qapi/trace-events index cccafc07e5..ab108c4f0e 100644 --- a/qapi/trace-events +++ b/qapi/trace-events @@ -17,8 +17,8 @@ visit_start_alternate(void *v, const char *name, void *obj, size_t size) "v=%p n visit_end_alternate(void *v, void *obj) "v=%p obj=%p" visit_optional(void *v, const char *name, bool *present) "v=%p name=%s present=%p" -visit_deprecated_accept(void *v, const char *name) "v=%p name=%s" -visit_deprecated(void *v, const char *name) "v=%p name=%s" +visit_policy_reject(void *v, const char *name) "v=%p name=%s" +visit_policy_skip(void *v, const char *name) "v=%p name=%s" visit_type_enum(void *v, const char *name, int *obj) "v=%p name=%s obj=%p" visit_type_int(void *v, const char *name, int64_t *obj) "v=%p name=%s obj=%p" diff --git a/qapi/transaction.json b/qapi/transaction.json index d175b5f863..381a2df782 100644 --- a/qapi/transaction.json +++ b/qapi/transaction.json @@ -54,6 +54,10 @@ # @blockdev-snapshot-sync: since 1.1 # @drive-backup: Since 1.6 # +# Features: +# @deprecated: Member @drive-backup is deprecated. Use member +# @blockdev-backup instead. +# # Since: 1.1 ## { 'enum': 'TransactionActionKind', @@ -62,7 +66,7 @@ 'block-dirty-bitmap-disable', 'block-dirty-bitmap-merge', 'blockdev-backup', 'blockdev-snapshot', 'blockdev-snapshot-internal-sync', 'blockdev-snapshot-sync', - 'drive-backup' ] } + { 'name': 'drive-backup', 'features': [ 'deprecated' ] } ] } ## # @AbortWrapper: diff --git a/qemu-img-cmds.hx b/qemu-img-cmds.hx index 4c4d94ab22..72bcdcfbfa 100644 --- a/qemu-img-cmds.hx +++ b/qemu-img-cmds.hx @@ -48,7 +48,7 @@ ERST DEF("convert", img_convert, "convert [--object objectdef] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps] [-U] [-C] [-c] [-p] [-q] [-n] [-f fmt] [-t cache] [-T src_cache] [-O output_fmt] [-B backing_file [-F backing_fmt]] [-o options] [-l snapshot_param] [-S sparse_size] [-r rate_limit] [-m num_coroutines] [-W] [--salvage] filename [filename2 [...]] output_filename") SRST -.. option:: convert [--object OBJECTDEF] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps] [-U] [-C] [-c] [-p] [-q] [-n] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-O OUTPUT_FMT] [-B BACKING_FILE] [-o OPTIONS] [-l SNAPSHOT_PARAM] [-S SPARSE_SIZE] [-r RATE_LIMIT] [-m NUM_COROUTINES] [-W] [--salvage] FILENAME [FILENAME2 [...]] OUTPUT_FILENAME +.. option:: convert [--object OBJECTDEF] [--image-opts] [--target-image-opts] [--target-is-zero] [--bitmaps] [-U] [-C] [-c] [-p] [-q] [-n] [-f FMT] [-t CACHE] [-T SRC_CACHE] [-O OUTPUT_FMT] [-B BACKING_FILE [-F BACKING_FMT]] [-o OPTIONS] [-l SNAPSHOT_PARAM] [-S SPARSE_SIZE] [-r RATE_LIMIT] [-m NUM_COROUTINES] [-W] [--salvage] FILENAME [FILENAME2 [...]] OUTPUT_FILENAME ERST DEF("create", img_create, diff --git a/qemu-nbd.c b/qemu-nbd.c index 65ebec598f..9d895ba24b 100644 --- a/qemu-nbd.c +++ b/qemu-nbd.c @@ -135,7 +135,9 @@ static void usage(const char *name) " 'snapshot.id=[ID],snapshot.name=[NAME]', or\n" " '[ID_OR_NAME]'\n" " -n, --nocache disable host cache\n" -" --cache=MODE set cache mode (none, writeback, ...)\n" +" --cache=MODE set cache mode used to access the disk image, the\n" +" valid options are: 'none', 'writeback' (default),\n" +" 'writethrough', 'directsync' and 'unsafe'\n" " --aio=MODE set AIO mode (native, io_uring or threads)\n" " --discard=MODE set discard mode (ignore, unmap)\n" " --detect-zeroes=MODE set detect-zeroes mode (off, on, unmap)\n" @@ -552,7 +554,7 @@ int main(int argc, char **argv) bool alloc_depth = false; const char *tlscredsid = NULL; bool imageOpts = false; - bool writethrough = true; + bool writethrough = false; /* Client will flush as needed. */ bool fork_process = false; bool list = false; int old_stderr = -1; diff --git a/qemu-options.hx b/qemu-options.hx index 8f603cc7e6..7749f59300 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -126,8 +126,14 @@ SRST -m 512M ERST -HXCOMM Deprecated by -machine -DEF("M", HAS_ARG, QEMU_OPTION_M, "", QEMU_ARCH_ALL) +DEF("M", HAS_ARG, QEMU_OPTION_M, + " sgx-epc.0.memdev=memid\n", + QEMU_ARCH_ALL) + +SRST +``sgx-epc.0.memdev=@var{memid}`` + Define an SGX EPC section. +ERST DEF("cpu", HAS_ARG, QEMU_OPTION_cpu, "-cpu cpu select CPU ('-cpu help' for list)\n", QEMU_ARCH_ALL) @@ -200,7 +206,7 @@ SRST ERST DEF("smp", HAS_ARG, QEMU_OPTION_smp, - "-smp [[cpus=]n][,maxcpus=cpus][,sockets=sockets][,dies=dies][,cores=cores][,threads=threads]\n" + "-smp [[cpus=]n][,maxcpus=maxcpus][,sockets=sockets][,dies=dies][,cores=cores][,threads=threads]\n" " set the number of CPUs to 'n' [default=1]\n" " maxcpus= maximum number of total CPUs, including\n" " offline CPUs for hotplug, etc\n" @@ -214,9 +220,14 @@ SRST Simulate a SMP system with '\ ``n``\ ' CPUs initially present on the machine type board. On boards supporting CPU hotplug, the optional '\ ``maxcpus``\ ' parameter can be set to enable further CPUs to be - added at runtime. If omitted the maximum number of CPUs will be - set to match the initial CPU count. Both parameters are subject to - an upper limit that is determined by the specific machine type chosen. + added at runtime. When both parameters are omitted, the maximum number + of CPUs will be calculated from the provided topology members and the + initial CPU count will match the maximum number. When only one of them + is given then the omitted one will be set to its counterpart's value. + Both parameters may be specified, but the maximum number of CPUs must + be equal to or greater than the initial CPU count. Both parameters are + subject to an upper limit that is determined by the specific machine + type chosen. To control reporting of CPU topology information, the number of sockets, dies per socket, cores per die, and threads per core can be specified. @@ -227,11 +238,14 @@ SRST of computing the CPU maximum count. Either the initial CPU count, or at least one of the topology parameters - must be specified. Values for any omitted parameters will be computed - from those which are given. Historically preference was given to the - coarsest topology parameters when computing missing values (ie sockets - preferred over cores, which were preferred over threads), however, this - behaviour is considered liable to change. + must be specified. The specified parameters must be greater than zero, + explicit configuration like "cpus=0" is not allowed. Values for any + omitted parameters will be computed from those which are given. + Historically preference was given to the coarsest topology parameters + when computing missing values (ie sockets preferred over cores, which + were preferred over threads), however, this behaviour is considered + liable to change. Prior to 6.2 the preference was sockets over cores + over threads. Since 6.2 the preference is cores over sockets over threads. ERST DEF("numa", HAS_ARG, QEMU_OPTION_numa, @@ -387,7 +401,7 @@ SRST -m 2G \ -object memory-backend-ram,size=1G,id=m0 \ -object memory-backend-ram,size=1G,id=m1 \ - -smp 2 \ + -smp 2,sockets=2,maxcpus=2 \ -numa node,nodeid=0,memdev=m0 \ -numa node,nodeid=1,memdev=m1,initiator=0 \ -numa cpu,node-id=0,socket-id=0 \ @@ -1881,8 +1895,8 @@ SRST Valid parameters are: ``grab-mod=`` : Used to select the modifier keys for toggling - the mouse grabbing in conjunction with the "g" key. `` can be - either `lshift-lctrl-lalt` or `rctrl`. + the mouse grabbing in conjunction with the "g" key. ```` can be + either ``lshift-lctrl-lalt`` or ``rctrl``. ``alt_grab=on|off`` : Use Control+Alt+Shift-g to toggle mouse grabbing. This parameter is deprecated - use ``grab-mod`` instead. @@ -3188,7 +3202,7 @@ DEFHEADING(Character device options:) DEF("chardev", HAS_ARG, QEMU_OPTION_chardev, "-chardev help\n" "-chardev null,id=id[,mux=on|off][,logfile=PATH][,logappend=on|off]\n" - "-chardev socket,id=id[,host=host],port=port[,to=to][,ipv4=on|off][,ipv6=on|off][,nodelay=on|off][,reconnect=seconds]\n" + "-chardev socket,id=id[,host=host],port=port[,to=to][,ipv4=on|off][,ipv6=on|off][,nodelay=on|off]\n" " [,server=on|off][,wait=on|off][,telnet=on|off][,websocket=on|off][,reconnect=seconds][,mux=on|off]\n" " [,logfile=PATH][,logappend=on|off][,tls-creds=ID][,tls-authz=ID] (tcp)\n" "-chardev socket,id=id,path=path[,server=on|off][,wait=on|off][,telnet=on|off][,websocket=on|off][,reconnect=seconds]\n" @@ -3627,7 +3641,9 @@ DEFHEADING(Debug/Expert options:) DEF("compat", HAS_ARG, QEMU_OPTION_compat, "-compat [deprecated-input=accept|reject|crash][,deprecated-output=accept|hide]\n" - " Policy for handling deprecated management interfaces\n", + " Policy for handling deprecated management interfaces\n" + "-compat [unstable-input=accept|reject|crash][,unstable-output=accept|hide]\n" + " Policy for handling unstable management interfaces\n", QEMU_ARCH_ALL) SRST ``-compat [deprecated-input=@var{input-policy}][,deprecated-output=@var{output-policy}]`` @@ -3645,6 +3661,22 @@ SRST Suppress deprecated command results and events Limitation: covers only syntactic aspects of QMP. + +``-compat [unstable-input=@var{input-policy}][,unstable-output=@var{output-policy}]`` + Set policy for handling unstable management interfaces (experimental): + + ``unstable-input=accept`` (default) + Accept unstable commands and arguments + ``unstable-input=reject`` + Reject unstable commands and arguments + ``unstable-input=crash`` + Crash on unstable commands and arguments + ``unstable-output=accept`` (default) + Emit unstable command results and events + ``unstable-output=hide`` + Suppress unstable command results and events + + Limitation: covers only syntactic aspects of QMP. ERST DEF("fw_cfg", HAS_ARG, QEMU_OPTION_fwcfg, diff --git a/qga/qapi-schema.json b/qga/qapi-schema.json index c60f5e669d..94e4aacdcc 100644 --- a/qga/qapi-schema.json +++ b/qga/qapi-schema.json @@ -1140,6 +1140,7 @@ ## # @GuestExec: +# # @pid: pid of child process in guest OS # # Since: 2.5 @@ -1171,6 +1172,7 @@ ## # @GuestHostName: +# # @host-name: Fully qualified domain name of the guest OS # # Since: 2.10 @@ -1197,6 +1199,7 @@ ## # @GuestUser: +# # @user: Username # @domain: Logon domain (windows only) # @login-time: Time of login of this user on the computer. If multiple diff --git a/qom/object.c b/qom/object.c index e86cb05b84..4f0677cca9 100644 --- a/qom/object.c +++ b/qom/object.c @@ -1389,7 +1389,7 @@ bool object_property_get(Object *obj, const char *name, Visitor *v, bool object_property_set(Object *obj, const char *name, Visitor *v, Error **errp) { - Error *err = NULL; + ERRP_GUARD(); ObjectProperty *prop = object_property_find_err(obj, name, errp); if (prop == NULL) { @@ -1400,9 +1400,8 @@ bool object_property_set(Object *obj, const char *name, Visitor *v, error_setg(errp, QERR_PERMISSION_DENIED); return false; } - prop->set(obj, v, name, prop->opaque, &err); - error_propagate(errp, err); - return !err; + prop->set(obj, v, name, prop->opaque, errp); + return !*errp; } bool object_property_set_str(Object *obj, const char *name, @@ -2145,6 +2144,17 @@ Object *object_resolve_path(const char *path, bool *ambiguous) return object_resolve_path_type(path, TYPE_OBJECT, ambiguous); } +Object *object_resolve_path_at(Object *parent, const char *path) +{ + g_auto(GStrv) parts = g_strsplit(path, "/", 0); + + if (*path == '/') { + return object_resolve_abs_path(object_get_root(), parts + 1, + TYPE_OBJECT); + } + return object_resolve_abs_path(parent, parts, TYPE_OBJECT); +} + typedef struct StringProperty { char *(*get)(Object *, Error **); diff --git a/qom/object_interfaces.c b/qom/object_interfaces.c index ad9b56b59a..3b61c195c5 100644 --- a/qom/object_interfaces.c +++ b/qom/object_interfaces.c @@ -46,25 +46,18 @@ static void object_set_properties_from_qdict(Object *obj, const QDict *qdict, Visitor *v, Error **errp) { const QDictEntry *e; - Error *local_err = NULL; - if (!visit_start_struct(v, NULL, NULL, 0, &local_err)) { - goto out; + if (!visit_start_struct(v, NULL, NULL, 0, errp)) { + return; } for (e = qdict_first(qdict); e; e = qdict_next(qdict, e)) { - if (!object_property_set(obj, e->key, v, &local_err)) { - break; + if (!object_property_set(obj, e->key, v, errp)) { + goto out; } } - if (!local_err) { - visit_check_struct(v, &local_err); - } - visit_end_struct(v, NULL); - + visit_check_struct(v, errp); out: - if (local_err) { - error_propagate(errp, local_err); - } + visit_end_struct(v, NULL); } void object_set_properties_from_keyval(Object *obj, const QDict *qdict, diff --git a/roms/Makefile b/roms/Makefile index eeb5970348..b967b53bb7 100644 --- a/roms/Makefile +++ b/roms/Makefile @@ -143,7 +143,8 @@ build-efi-roms: build-pxe-roms # efirom # edk2-basetools: - cd edk2/BaseTools && git submodule update --init --force + cd edk2/BaseTools && git submodule update --init --force \ + Source/C/BrotliCompress/brotli $(MAKE) -C edk2/BaseTools \ PYTHON_COMMAND=$${EDK2_PYTHON_COMMAND:-python3} \ EXTRA_OPTFLAGS='$(EDK2_BASETOOLS_OPTFLAGS)' \ diff --git a/roms/Makefile.edk2 b/roms/Makefile.edk2 index a8ed325575..fdae0b511f 100644 --- a/roms/Makefile.edk2 +++ b/roms/Makefile.edk2 @@ -51,7 +51,12 @@ all: $(foreach flashdev,$(flashdevs),../pc-bios/edk2-$(flashdev).fd.bz2) \ # make-release/tarball scripts. submodules: if test -d edk2/.git; then \ - cd edk2 && git submodule update --init --force; \ + cd edk2 && git submodule update --init --force -- \ + ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3 \ + BaseTools/Source/C/BrotliCompress/brotli \ + CryptoPkg/Library/OpensslLib/openssl \ + MdeModulePkg/Library/BrotliCustomDecompressLib/brotli \ + ; \ fi # See notes on the ".NOTPARALLEL" target and the "+" indicator in diff --git a/roms/openbios b/roms/openbios index d657b65318..b9062deaae 160000 --- a/roms/openbios +++ b/roms/openbios @@ -1 +1 @@ -Subproject commit d657b653186c0fd6e062cab133497415c2a5a5b8 +Subproject commit b9062deaaea7269369eaa46260d75edcaf276afa diff --git a/roms/seabios b/roms/seabios index 155821a199..64f37cc530 160000 --- a/roms/seabios +++ b/roms/seabios @@ -1 +1 @@ -Subproject commit 155821a1990b6de78dde5f98fa5ab90e802021e0 +Subproject commit 64f37cc530f144e53c190c9e8209a51b58fd5c43 diff --git a/roms/seabios-hppa b/roms/seabios-hppa index 73b740f771..b12acac4be 160000 --- a/roms/seabios-hppa +++ b/roms/seabios-hppa @@ -1 +1 @@ -Subproject commit 73b740f77190643b2ada5ee97a9a108c6ef2a37b +Subproject commit b12acac4be27b6d5d9fbe48c4be1286dcc245fbb diff --git a/scripts/analyze-migration.py b/scripts/analyze-migration.py index d7177b212c..b82a1b0c58 100755 --- a/scripts/analyze-migration.py +++ b/scripts/analyze-migration.py @@ -588,7 +588,7 @@ if args.extract: dump.read(desc_only = True) print("desc.json") - f = open("desc.json", "wb") + f = open("desc.json", "w") f.truncate() f.write(jsonenc.encode(dump.vmsd_desc)) f.close() @@ -596,7 +596,7 @@ if args.extract: dump.read(write_memory = True) dict = dump.getDict() print("state.json") - f = open("state.json", "wb") + f = open("state.json", "w") f.truncate() f.write(jsonenc.encode(dict)) f.close() @@ -610,4 +610,4 @@ elif args.dump == "desc": dump.read(desc_only = True) print(jsonenc.encode(dump.vmsd_desc)) else: - raise Exception("Please specify either -x, -d state or -d dump") + raise Exception("Please specify either -x, -d state or -d desc") diff --git a/scripts/block-coroutine-wrapper.py b/scripts/block-coroutine-wrapper.py index 85dbeb9ecf..08be813407 100644 --- a/scripts/block-coroutine-wrapper.py +++ b/scripts/block-coroutine-wrapper.py @@ -100,12 +100,20 @@ def snake_to_camel(func_name: str) -> str: def gen_wrapper(func: FuncDecl) -> str: assert not '_co_' in func.name assert func.return_type == 'int' - assert func.args[0].type in ['BlockDriverState *', 'BdrvChild *'] + assert func.args[0].type in ['BlockDriverState *', 'BdrvChild *', + 'BlockBackend *'] subsystem, subname = func.name.split('_', 1) name = f'{subsystem}_co_{subname}' - bs = 'bs' if func.args[0].type == 'BlockDriverState *' else 'child->bs' + + t = func.args[0].type + if t == 'BlockDriverState *': + bs = 'bs' + elif t == 'BdrvChild *': + bs = 'child->bs' + else: + bs = 'blk_bs(blk)' struct_name = snake_to_camel(name) return f"""\ diff --git a/scripts/make-release b/scripts/make-release index a2a8cda33c..05b14ecc95 100755 --- a/scripts/make-release +++ b/scripts/make-release @@ -27,7 +27,12 @@ git submodule update --init # don't necessarily have much control over how a submodule handles its # submodule dependencies, so we continue to handle these on a case-by-case # basis for now. -(cd roms/edk2 && git submodule update --init) +(cd roms/edk2 && \ + git submodule update --init -- \ + ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3 \ + BaseTools/Source/C/BrotliCompress/brotli \ + CryptoPkg/Library/OpensslLib/openssl \ + MdeModulePkg/Library/BrotliCustomDecompressLib/brotli) popd tar --exclude=.git -cjf ${destination}.tar.bz2 ${destination} rm -rf ${destination} diff --git a/scripts/meson-buildoptions.py b/scripts/meson-buildoptions.py new file mode 100755 index 0000000000..96969d89ee --- /dev/null +++ b/scripts/meson-buildoptions.py @@ -0,0 +1,156 @@ +#! /usr/bin/env python3 + +# Generate configure command line options handling code, based on Meson's +# user build options introspection data +# +# Copyright (C) 2021 Red Hat, Inc. +# +# Author: Paolo Bonzini +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import json +import textwrap +import shlex +import sys + +SKIP_OPTIONS = { + "audio_drv_list", + "default_devices", + "docdir", + "fuzzing_engine", + "qemu_firmwarepath", + "qemu_suffix", + "sphinx_build", + "trace_file", +} + +LINE_WIDTH = 76 + + +# Convert the default value of an option to the string used in +# the help message +def value_to_help(value): + if isinstance(value, list): + return ",".join(value) + if isinstance(value, bool): + return "enabled" if value else "disabled" + return str(value) + + +def wrap(left, text, indent): + spaces = " " * indent + if len(left) >= indent: + yield left + left = spaces + else: + left = (left + spaces)[0:indent] + yield from textwrap.wrap( + text, width=LINE_WIDTH, initial_indent=left, subsequent_indent=spaces + ) + + +def sh_print(line=""): + print(' printf "%s\\n"', shlex.quote(line)) + + +def help_line(left, opt, indent, long): + right = f'{opt["description"]}' + if long: + value = value_to_help(opt["value"]) + if value != "auto": + right += f" [{value}]" + if "choices" in opt and long: + choices = "/".join(sorted(opt["choices"])) + right += f" (choices: {choices})" + for x in wrap(" " + left, right, indent): + sh_print(x) + + +# Return whether the option (a dictionary) can be used with +# arguments. Booleans can never be used with arguments; +# combos allow an argument only if they accept other values +# than "auto", "enabled", and "disabled". +def allow_arg(opt): + if opt["type"] == "boolean": + return False + if opt["type"] != "combo": + return True + return not (set(opt["choices"]) <= {"auto", "disabled", "enabled"}) + + +def load_options(json): + json = [ + x + for x in json + if x["section"] == "user" + and ":" not in x["name"] + and x["name"] not in SKIP_OPTIONS + ] + return sorted(json, key=lambda x: x["name"]) + + +def print_help(options): + print("meson_options_help() {") + for opt in options: + key = opt["name"].replace("_", "-") + # The first section includes options that have an arguments, + # and booleans (i.e., only one of enable/disable makes sense) + if opt["type"] == "boolean": + left = f"--disable-{key}" if opt["value"] else f"--enable-{key}" + help_line(left, opt, 27, False) + elif allow_arg(opt): + if opt["type"] == "combo" and "enabled" in opt["choices"]: + left = f"--enable-{key}[=CHOICE]" + else: + left = f"--enable-{key}=CHOICE" + help_line(left, opt, 27, True) + + sh_print() + sh_print("Optional features, enabled with --enable-FEATURE and") + sh_print("disabled with --disable-FEATURE, default is enabled if available") + sh_print("(unless built with --without-default-features):") + sh_print() + for opt in options: + key = opt["name"].replace("_", "-") + if opt["type"] != "boolean" and not allow_arg(opt): + help_line(key, opt, 18, False) + print("}") + + +def print_parse(options): + print("_meson_option_parse() {") + print(" case $1 in") + for opt in options: + key = opt["name"].replace("_", "-") + name = opt["name"] + if opt["type"] == "boolean": + print(f' --enable-{key}) printf "%s" -D{name}=true ;;') + print(f' --disable-{key}) printf "%s" -D{name}=false ;;') + else: + if opt["type"] == "combo" and "enabled" in opt["choices"]: + print(f' --enable-{key}) printf "%s" -D{name}=enabled ;;') + if opt["type"] == "combo" and "disabled" in opt["choices"]: + print(f' --disable-{key}) printf "%s" -D{name}=disabled ;;') + if allow_arg(opt): + print(f' --enable-{key}=*) quote_sh "-D{name}=$2" ;;') + print(" *) return 1 ;;") + print(" esac") + print("}") + + +options = load_options(json.load(sys.stdin)) +print("# This file is generated by meson-buildoptions.py, do not edit!") +print_help(options) +print_parse(options) diff --git a/scripts/meson-buildoptions.sh b/scripts/meson-buildoptions.sh new file mode 100644 index 0000000000..45e1f2e20d --- /dev/null +++ b/scripts/meson-buildoptions.sh @@ -0,0 +1,272 @@ +# This file is generated by meson-buildoptions.py, do not edit! +meson_options_help() { + printf "%s\n" ' --enable-capstone[=CHOICE]' + printf "%s\n" ' Whether and how to find the capstone library' + printf "%s\n" ' (choices: auto/disabled/enabled/internal/system)' + printf "%s\n" ' --enable-cfi Control-Flow Integrity (CFI)' + printf "%s\n" ' --enable-cfi-debug Verbose errors in case of CFI violation' + printf "%s\n" ' --enable-fdt[=CHOICE] Whether and how to find the libfdt library' + printf "%s\n" ' (choices: auto/disabled/enabled/internal/system)' + printf "%s\n" ' --enable-fuzzing build fuzzing targets' + printf "%s\n" ' --disable-install-blobs install provided firmware blobs' + printf "%s\n" ' --enable-malloc=CHOICE choose memory allocator to use [system] (choices:' + printf "%s\n" ' jemalloc/system/tcmalloc)' + printf "%s\n" ' --enable-slirp[=CHOICE] Whether and how to find the slirp library' + printf "%s\n" ' (choices: auto/disabled/enabled/internal/system)' + printf "%s\n" ' --enable-tcg-interpreter TCG with bytecode interpreter (slow)' + printf "%s\n" ' --enable-trace-backends=CHOICE' + printf "%s\n" ' Set available tracing backends [log] (choices:' + printf "%s\n" ' dtrace/ftrace/log/nop/simple/syslog/ust)' + printf "%s\n" '' + printf "%s\n" 'Optional features, enabled with --enable-FEATURE and' + printf "%s\n" 'disabled with --disable-FEATURE, default is enabled if available' + printf "%s\n" '(unless built with --without-default-features):' + printf "%s\n" '' + printf "%s\n" ' alsa ALSA sound support' + printf "%s\n" ' attr attr/xattr support' + printf "%s\n" ' auth-pam PAM access control' + printf "%s\n" ' bpf eBPF support' + printf "%s\n" ' brlapi brlapi character device driver' + printf "%s\n" ' bzip2 bzip2 support for DMG images' + printf "%s\n" ' cap-ng cap_ng support' + printf "%s\n" ' cocoa Cocoa user interface (macOS only)' + printf "%s\n" ' coreaudio CoreAudio sound support' + printf "%s\n" ' curl CURL block device driver' + printf "%s\n" ' curses curses UI' + printf "%s\n" ' docs Documentations build support' + printf "%s\n" ' dsound DirectSound sound support' + printf "%s\n" ' fuse FUSE block device export' + printf "%s\n" ' fuse-lseek SEEK_HOLE/SEEK_DATA support for FUSE exports' + printf "%s\n" ' gcrypt libgcrypt cryptography support' + printf "%s\n" ' gettext Localization of the GTK+ user interface' + printf "%s\n" ' glusterfs Glusterfs block device driver' + printf "%s\n" ' gnutls GNUTLS cryptography support' + printf "%s\n" ' gtk GTK+ user interface' + printf "%s\n" ' guest-agent-msi Build MSI package for the QEMU Guest Agent' + printf "%s\n" ' hax HAX acceleration support' + printf "%s\n" ' hvf HVF acceleration support' + printf "%s\n" ' iconv Font glyph conversion support' + printf "%s\n" ' jack JACK sound support' + printf "%s\n" ' kvm KVM acceleration support' + printf "%s\n" ' l2tpv3 l2tpv3 network backend support' + printf "%s\n" ' libdaxctl libdaxctl support' + printf "%s\n" ' libiscsi libiscsi userspace initiator' + printf "%s\n" ' libnfs libnfs block device driver' + printf "%s\n" ' libpmem libpmem support' + printf "%s\n" ' libudev Use libudev to enumerate host devices' + printf "%s\n" ' libusb libusb support for USB passthrough' + printf "%s\n" ' libxml2 libxml2 support for Parallels image format' + printf "%s\n" ' linux-aio Linux AIO support' + printf "%s\n" ' linux-io-uring Linux io_uring support' + printf "%s\n" ' lzfse lzfse support for DMG images' + printf "%s\n" ' lzo lzo compression support' + printf "%s\n" ' malloc-trim enable libc malloc_trim() for memory optimization' + printf "%s\n" ' mpath Multipath persistent reservation passthrough' + printf "%s\n" ' multiprocess Out of process device emulation support' + printf "%s\n" ' netmap netmap network backend support' + printf "%s\n" ' nettle nettle cryptography support' + printf "%s\n" ' nvmm NVMM acceleration support' + printf "%s\n" ' oss OSS sound support' + printf "%s\n" ' pa PulseAudio sound support' + printf "%s\n" ' rbd Ceph block device driver' + printf "%s\n" ' sdl SDL user interface' + printf "%s\n" ' sdl-image SDL Image support for icons' + printf "%s\n" ' seccomp seccomp support' + printf "%s\n" ' smartcard CA smartcard emulation support' + printf "%s\n" ' snappy snappy compression support' + printf "%s\n" ' sparse sparse checker' + printf "%s\n" ' spice Spice server support' + printf "%s\n" ' spice-protocol Spice protocol support' + printf "%s\n" ' tcg TCG support' + printf "%s\n" ' u2f U2F emulation support' + printf "%s\n" ' usb-redir libusbredir support' + printf "%s\n" ' vde vde network backend support' + printf "%s\n" ' vhost-user-blk-server' + printf "%s\n" ' build vhost-user-blk server' + printf "%s\n" ' virglrenderer virgl rendering support' + printf "%s\n" ' virtfs virtio-9p support' + printf "%s\n" ' virtiofsd build virtiofs daemon (virtiofsd)' + printf "%s\n" ' vnc VNC server' + printf "%s\n" ' vnc-jpeg JPEG lossy compression for VNC server' + printf "%s\n" ' vnc-png PNG compression for VNC server' + printf "%s\n" ' vnc-sasl SASL authentication for VNC server' + printf "%s\n" ' vte vte support for the gtk UI' + printf "%s\n" ' whpx WHPX acceleration support' + printf "%s\n" ' xen Xen backend support' + printf "%s\n" ' xen-pci-passthrough' + printf "%s\n" ' Xen PCI passthrough support' + printf "%s\n" ' xkbcommon xkbcommon support' + printf "%s\n" ' zstd zstd compression support' +} +_meson_option_parse() { + case $1 in + --enable-alsa) printf "%s" -Dalsa=enabled ;; + --disable-alsa) printf "%s" -Dalsa=disabled ;; + --enable-attr) printf "%s" -Dattr=enabled ;; + --disable-attr) printf "%s" -Dattr=disabled ;; + --enable-auth-pam) printf "%s" -Dauth_pam=enabled ;; + --disable-auth-pam) printf "%s" -Dauth_pam=disabled ;; + --enable-bpf) printf "%s" -Dbpf=enabled ;; + --disable-bpf) printf "%s" -Dbpf=disabled ;; + --enable-brlapi) printf "%s" -Dbrlapi=enabled ;; + --disable-brlapi) printf "%s" -Dbrlapi=disabled ;; + --enable-bzip2) printf "%s" -Dbzip2=enabled ;; + --disable-bzip2) printf "%s" -Dbzip2=disabled ;; + --enable-cap-ng) printf "%s" -Dcap_ng=enabled ;; + --disable-cap-ng) printf "%s" -Dcap_ng=disabled ;; + --enable-capstone) printf "%s" -Dcapstone=enabled ;; + --disable-capstone) printf "%s" -Dcapstone=disabled ;; + --enable-capstone=*) quote_sh "-Dcapstone=$2" ;; + --enable-cfi) printf "%s" -Dcfi=true ;; + --disable-cfi) printf "%s" -Dcfi=false ;; + --enable-cfi-debug) printf "%s" -Dcfi_debug=true ;; + --disable-cfi-debug) printf "%s" -Dcfi_debug=false ;; + --enable-cocoa) printf "%s" -Dcocoa=enabled ;; + --disable-cocoa) printf "%s" -Dcocoa=disabled ;; + --enable-coreaudio) printf "%s" -Dcoreaudio=enabled ;; + --disable-coreaudio) printf "%s" -Dcoreaudio=disabled ;; + --enable-curl) printf "%s" -Dcurl=enabled ;; + --disable-curl) printf "%s" -Dcurl=disabled ;; + --enable-curses) printf "%s" -Dcurses=enabled ;; + --disable-curses) printf "%s" -Dcurses=disabled ;; + --enable-docs) printf "%s" -Ddocs=enabled ;; + --disable-docs) printf "%s" -Ddocs=disabled ;; + --enable-dsound) printf "%s" -Ddsound=enabled ;; + --disable-dsound) printf "%s" -Ddsound=disabled ;; + --enable-fdt) printf "%s" -Dfdt=enabled ;; + --disable-fdt) printf "%s" -Dfdt=disabled ;; + --enable-fdt=*) quote_sh "-Dfdt=$2" ;; + --enable-fuse) printf "%s" -Dfuse=enabled ;; + --disable-fuse) printf "%s" -Dfuse=disabled ;; + --enable-fuse-lseek) printf "%s" -Dfuse_lseek=enabled ;; + --disable-fuse-lseek) printf "%s" -Dfuse_lseek=disabled ;; + --enable-fuzzing) printf "%s" -Dfuzzing=true ;; + --disable-fuzzing) printf "%s" -Dfuzzing=false ;; + --enable-gcrypt) printf "%s" -Dgcrypt=enabled ;; + --disable-gcrypt) printf "%s" -Dgcrypt=disabled ;; + --enable-gettext) printf "%s" -Dgettext=enabled ;; + --disable-gettext) printf "%s" -Dgettext=disabled ;; + --enable-glusterfs) printf "%s" -Dglusterfs=enabled ;; + --disable-glusterfs) printf "%s" -Dglusterfs=disabled ;; + --enable-gnutls) printf "%s" -Dgnutls=enabled ;; + --disable-gnutls) printf "%s" -Dgnutls=disabled ;; + --enable-gtk) printf "%s" -Dgtk=enabled ;; + --disable-gtk) printf "%s" -Dgtk=disabled ;; + --enable-guest-agent-msi) printf "%s" -Dguest_agent_msi=enabled ;; + --disable-guest-agent-msi) printf "%s" -Dguest_agent_msi=disabled ;; + --enable-hax) printf "%s" -Dhax=enabled ;; + --disable-hax) printf "%s" -Dhax=disabled ;; + --enable-hvf) printf "%s" -Dhvf=enabled ;; + --disable-hvf) printf "%s" -Dhvf=disabled ;; + --enable-iconv) printf "%s" -Diconv=enabled ;; + --disable-iconv) printf "%s" -Diconv=disabled ;; + --enable-install-blobs) printf "%s" -Dinstall_blobs=true ;; + --disable-install-blobs) printf "%s" -Dinstall_blobs=false ;; + --enable-jack) printf "%s" -Djack=enabled ;; + --disable-jack) printf "%s" -Djack=disabled ;; + --enable-kvm) printf "%s" -Dkvm=enabled ;; + --disable-kvm) printf "%s" -Dkvm=disabled ;; + --enable-l2tpv3) printf "%s" -Dl2tpv3=enabled ;; + --disable-l2tpv3) printf "%s" -Dl2tpv3=disabled ;; + --enable-libdaxctl) printf "%s" -Dlibdaxctl=enabled ;; + --disable-libdaxctl) printf "%s" -Dlibdaxctl=disabled ;; + --enable-libiscsi) printf "%s" -Dlibiscsi=enabled ;; + --disable-libiscsi) printf "%s" -Dlibiscsi=disabled ;; + --enable-libnfs) printf "%s" -Dlibnfs=enabled ;; + --disable-libnfs) printf "%s" -Dlibnfs=disabled ;; + --enable-libpmem) printf "%s" -Dlibpmem=enabled ;; + --disable-libpmem) printf "%s" -Dlibpmem=disabled ;; + --enable-libudev) printf "%s" -Dlibudev=enabled ;; + --disable-libudev) printf "%s" -Dlibudev=disabled ;; + --enable-libusb) printf "%s" -Dlibusb=enabled ;; + --disable-libusb) printf "%s" -Dlibusb=disabled ;; + --enable-libxml2) printf "%s" -Dlibxml2=enabled ;; + --disable-libxml2) printf "%s" -Dlibxml2=disabled ;; + --enable-linux-aio) printf "%s" -Dlinux_aio=enabled ;; + --disable-linux-aio) printf "%s" -Dlinux_aio=disabled ;; + --enable-linux-io-uring) printf "%s" -Dlinux_io_uring=enabled ;; + --disable-linux-io-uring) printf "%s" -Dlinux_io_uring=disabled ;; + --enable-lzfse) printf "%s" -Dlzfse=enabled ;; + --disable-lzfse) printf "%s" -Dlzfse=disabled ;; + --enable-lzo) printf "%s" -Dlzo=enabled ;; + --disable-lzo) printf "%s" -Dlzo=disabled ;; + --enable-malloc=*) quote_sh "-Dmalloc=$2" ;; + --enable-malloc-trim) printf "%s" -Dmalloc_trim=enabled ;; + --disable-malloc-trim) printf "%s" -Dmalloc_trim=disabled ;; + --enable-mpath) printf "%s" -Dmpath=enabled ;; + --disable-mpath) printf "%s" -Dmpath=disabled ;; + --enable-multiprocess) printf "%s" -Dmultiprocess=enabled ;; + --disable-multiprocess) printf "%s" -Dmultiprocess=disabled ;; + --enable-netmap) printf "%s" -Dnetmap=enabled ;; + --disable-netmap) printf "%s" -Dnetmap=disabled ;; + --enable-nettle) printf "%s" -Dnettle=enabled ;; + --disable-nettle) printf "%s" -Dnettle=disabled ;; + --enable-nvmm) printf "%s" -Dnvmm=enabled ;; + --disable-nvmm) printf "%s" -Dnvmm=disabled ;; + --enable-oss) printf "%s" -Doss=enabled ;; + --disable-oss) printf "%s" -Doss=disabled ;; + --enable-pa) printf "%s" -Dpa=enabled ;; + --disable-pa) printf "%s" -Dpa=disabled ;; + --enable-rbd) printf "%s" -Drbd=enabled ;; + --disable-rbd) printf "%s" -Drbd=disabled ;; + --enable-sdl) printf "%s" -Dsdl=enabled ;; + --disable-sdl) printf "%s" -Dsdl=disabled ;; + --enable-sdl-image) printf "%s" -Dsdl_image=enabled ;; + --disable-sdl-image) printf "%s" -Dsdl_image=disabled ;; + --enable-seccomp) printf "%s" -Dseccomp=enabled ;; + --disable-seccomp) printf "%s" -Dseccomp=disabled ;; + --enable-slirp) printf "%s" -Dslirp=enabled ;; + --disable-slirp) printf "%s" -Dslirp=disabled ;; + --enable-slirp=*) quote_sh "-Dslirp=$2" ;; + --enable-smartcard) printf "%s" -Dsmartcard=enabled ;; + --disable-smartcard) printf "%s" -Dsmartcard=disabled ;; + --enable-snappy) printf "%s" -Dsnappy=enabled ;; + --disable-snappy) printf "%s" -Dsnappy=disabled ;; + --enable-sparse) printf "%s" -Dsparse=enabled ;; + --disable-sparse) printf "%s" -Dsparse=disabled ;; + --enable-spice) printf "%s" -Dspice=enabled ;; + --disable-spice) printf "%s" -Dspice=disabled ;; + --enable-spice-protocol) printf "%s" -Dspice_protocol=enabled ;; + --disable-spice-protocol) printf "%s" -Dspice_protocol=disabled ;; + --enable-tcg) printf "%s" -Dtcg=enabled ;; + --disable-tcg) printf "%s" -Dtcg=disabled ;; + --enable-tcg-interpreter) printf "%s" -Dtcg_interpreter=true ;; + --disable-tcg-interpreter) printf "%s" -Dtcg_interpreter=false ;; + --enable-trace-backends=*) quote_sh "-Dtrace_backends=$2" ;; + --enable-u2f) printf "%s" -Du2f=enabled ;; + --disable-u2f) printf "%s" -Du2f=disabled ;; + --enable-usb-redir) printf "%s" -Dusb_redir=enabled ;; + --disable-usb-redir) printf "%s" -Dusb_redir=disabled ;; + --enable-vde) printf "%s" -Dvde=enabled ;; + --disable-vde) printf "%s" -Dvde=disabled ;; + --enable-vhost-user-blk-server) printf "%s" -Dvhost_user_blk_server=enabled ;; + --disable-vhost-user-blk-server) printf "%s" -Dvhost_user_blk_server=disabled ;; + --enable-virglrenderer) printf "%s" -Dvirglrenderer=enabled ;; + --disable-virglrenderer) printf "%s" -Dvirglrenderer=disabled ;; + --enable-virtfs) printf "%s" -Dvirtfs=enabled ;; + --disable-virtfs) printf "%s" -Dvirtfs=disabled ;; + --enable-virtiofsd) printf "%s" -Dvirtiofsd=enabled ;; + --disable-virtiofsd) printf "%s" -Dvirtiofsd=disabled ;; + --enable-vnc) printf "%s" -Dvnc=enabled ;; + --disable-vnc) printf "%s" -Dvnc=disabled ;; + --enable-vnc-jpeg) printf "%s" -Dvnc_jpeg=enabled ;; + --disable-vnc-jpeg) printf "%s" -Dvnc_jpeg=disabled ;; + --enable-vnc-png) printf "%s" -Dvnc_png=enabled ;; + --disable-vnc-png) printf "%s" -Dvnc_png=disabled ;; + --enable-vnc-sasl) printf "%s" -Dvnc_sasl=enabled ;; + --disable-vnc-sasl) printf "%s" -Dvnc_sasl=disabled ;; + --enable-vte) printf "%s" -Dvte=enabled ;; + --disable-vte) printf "%s" -Dvte=disabled ;; + --enable-whpx) printf "%s" -Dwhpx=enabled ;; + --disable-whpx) printf "%s" -Dwhpx=disabled ;; + --enable-xen) printf "%s" -Dxen=enabled ;; + --disable-xen) printf "%s" -Dxen=disabled ;; + --enable-xen-pci-passthrough) printf "%s" -Dxen_pci_passthrough=enabled ;; + --disable-xen-pci-passthrough) printf "%s" -Dxen_pci_passthrough=disabled ;; + --enable-xkbcommon) printf "%s" -Dxkbcommon=enabled ;; + --disable-xkbcommon) printf "%s" -Dxkbcommon=disabled ;; + --enable-zstd) printf "%s" -Dzstd=enabled ;; + --disable-zstd) printf "%s" -Dzstd=disabled ;; + *) return 1 ;; + esac +} diff --git a/scripts/meson.build b/scripts/meson.build index e8cc63896d..1c89e10a76 100644 --- a/scripts/meson.build +++ b/scripts/meson.build @@ -1,3 +1,3 @@ -if 'CONFIG_TRACE_SYSTEMTAP' in config_host +if stap.found() install_data('qemu-trace-stap', install_dir: get_option('bindir')) endif diff --git a/scripts/mtest2make.py b/scripts/mtest2make.py index ee072c0502..02c0453e67 100644 --- a/scripts/mtest2make.py +++ b/scripts/mtest2make.py @@ -60,11 +60,8 @@ def process_tests(test, targets, suites): if test['workdir'] is not None: print('.test.dir.%d := %s' % (i, shlex.quote(test['workdir']))) - if 'depends' in test: - deps = (targets.get(x, []) for x in test['depends']) - deps = itertools.chain.from_iterable(deps) - else: - deps = ['all'] + deps = (targets.get(x, []) for x in test['depends']) + deps = itertools.chain.from_iterable(deps) print('.test.name.%d := %s' % (i, test['name'])) print('.test.driver.%d := %s' % (i, driver)) diff --git a/scripts/qapi/commands.py b/scripts/qapi/commands.py index 3654825968..21001bbd6b 100644 --- a/scripts/qapi/commands.py +++ b/scripts/qapi/commands.py @@ -26,6 +26,7 @@ from .gen import ( QAPISchemaModularCVisitor, build_params, ifcontext, + gen_special_features, ) from .schema import ( QAPISchema, @@ -217,9 +218,6 @@ def gen_register_command(name: str, coroutine: bool) -> str: options = [] - if 'deprecated' in [f.name for f in features]: - options += ['QCO_DEPRECATED'] - if not success_response: options += ['QCO_NO_SUCCESS_RESP'] if allow_oob: @@ -229,15 +227,13 @@ def gen_register_command(name: str, if coroutine: options += ['QCO_COROUTINE'] - if not options: - options = ['QCO_NO_OPTIONS'] - ret = mcgen(''' qmp_register_command(cmds, "%(name)s", - qmp_marshal_%(c_name)s, %(opts)s); + qmp_marshal_%(c_name)s, %(opts)s, %(feats)s); ''', name=name, c_name=c_name(name), - opts=" | ".join(options)) + opts=' | '.join(options) or 0, + feats=gen_special_features(features)) return ret diff --git a/scripts/qapi/events.py b/scripts/qapi/events.py index 82475e84ec..27b44c49f5 100644 --- a/scripts/qapi/events.py +++ b/scripts/qapi/events.py @@ -109,13 +109,15 @@ def gen_event_send(name: str, if not boxed: ret += gen_param_var(arg_type) - if 'deprecated' in [f.name for f in features]: - ret += mcgen(''' + for f in features: + if f.is_special(): + ret += mcgen(''' - if (compat_policy.deprecated_output == COMPAT_POLICY_OUTPUT_HIDE) { + if (compat_policy.%(feat)s_output == COMPAT_POLICY_OUTPUT_HIDE) { return; } -''') +''', + feat=f.name) ret += mcgen(''' diff --git a/scripts/qapi/expr.py b/scripts/qapi/expr.py index 819ea6ad97..3cb389e875 100644 --- a/scripts/qapi/expr.py +++ b/scripts/qapi/expr.py @@ -472,7 +472,7 @@ def check_enum(expr: _JSONObject, info: QAPISourceInfo) -> None: for m in members] for member in members: source = "'data' member" - check_keys(member, info, source, ['name'], ['if']) + check_keys(member, info, source, ['name'], ['if', 'features']) member_name = member['name'] check_name_is_str(member_name, info, source) source = "%s '%s'" % (source, member_name) @@ -483,6 +483,7 @@ def check_enum(expr: _JSONObject, info: QAPISourceInfo) -> None: permit_upper=permissive, permit_underscore=permissive) check_if(member, info, source) + check_features(member.get('features'), info) def check_struct(expr: _JSONObject, info: QAPISourceInfo) -> None: diff --git a/scripts/qapi/gen.py b/scripts/qapi/gen.py index ab26d5c937..995a97d2b8 100644 --- a/scripts/qapi/gen.py +++ b/scripts/qapi/gen.py @@ -18,6 +18,7 @@ from typing import ( Dict, Iterator, Optional, + Sequence, Tuple, ) @@ -29,6 +30,7 @@ from .common import ( mcgen, ) from .schema import ( + QAPISchemaFeature, QAPISchemaIfCond, QAPISchemaModule, QAPISchemaObjectType, @@ -37,6 +39,12 @@ from .schema import ( from .source import QAPISourceInfo +def gen_special_features(features: Sequence[QAPISchemaFeature]) -> str: + special_features = [f"1u << QAPI_{feat.name.upper()}" + for feat in features if feat.is_special()] + return ' | '.join(special_features) or '0' + + class QAPIGen: def __init__(self, fname: str): self.fname = fname @@ -296,10 +304,9 @@ class QAPISchemaModularCVisitor(QAPISchemaVisitor): self._current_module = old_module def write(self, output_dir: str, opt_builtins: bool = False) -> None: - for name in self._module: + for name, (genc, genh) in self._module.items(): if QAPISchemaModule.is_builtin_module(name) and not opt_builtins: continue - (genc, genh) = self._module[name] genc.write(output_dir) genh.write(output_dir) diff --git a/scripts/qapi/introspect.py b/scripts/qapi/introspect.py index 4c079ee627..67c7d89aae 100644 --- a/scripts/qapi/introspect.py +++ b/scripts/qapi/introspect.py @@ -68,6 +68,7 @@ JSONValue = Union[_Value, 'Annotated[_Value]'] # TypedDict constructs, so they are broadly typed here as simple # Python Dicts. SchemaInfo = Dict[str, object] +SchemaInfoEnumMember = Dict[str, object] SchemaInfoObject = Dict[str, object] SchemaInfoObjectVariant = Dict[str, object] SchemaInfoObjectMember = Dict[str, object] @@ -274,8 +275,17 @@ const QLitObject %(c_name)s = %(c_string)s; obj['features'] = self._gen_features(features) self._trees.append(Annotated(obj, ifcond, comment)) - def _gen_member(self, member: QAPISchemaObjectTypeMember - ) -> Annotated[SchemaInfoObjectMember]: + def _gen_enum_member(self, member: QAPISchemaEnumMember + ) -> Annotated[SchemaInfoEnumMember]: + obj: SchemaInfoEnumMember = { + 'name': member.name, + } + if member.features: + obj['features'] = self._gen_features(member.features) + return Annotated(obj, member.ifcond) + + def _gen_object_member(self, member: QAPISchemaObjectTypeMember + ) -> Annotated[SchemaInfoObjectMember]: obj: SchemaInfoObjectMember = { 'name': member.name, 'type': self._use_type(member.type) @@ -305,7 +315,8 @@ const QLitObject %(c_name)s = %(c_string)s; prefix: Optional[str]) -> None: self._gen_tree( name, 'enum', - {'values': [Annotated(m.name, m.ifcond) for m in members]}, + {'members': [self._gen_enum_member(m) for m in members], + 'values': [Annotated(m.name, m.ifcond) for m in members]}, ifcond, features ) @@ -322,7 +333,7 @@ const QLitObject %(c_name)s = %(c_string)s; members: List[QAPISchemaObjectTypeMember], variants: Optional[QAPISchemaVariants]) -> None: obj: SchemaInfoObject = { - 'members': [self._gen_member(m) for m in members] + 'members': [self._gen_object_member(m) for m in members] } if variants: obj['tag'] = variants.tag_member.name diff --git a/scripts/qapi/mypy.ini b/scripts/qapi/mypy.ini index 54ca4483d6..6625356429 100644 --- a/scripts/qapi/mypy.ini +++ b/scripts/qapi/mypy.ini @@ -3,11 +3,6 @@ strict = True disallow_untyped_calls = False python_version = 3.6 -[mypy-qapi.parser] -disallow_untyped_defs = False -disallow_incomplete_defs = False -check_untyped_defs = False - [mypy-qapi.schema] disallow_untyped_defs = False disallow_incomplete_defs = False diff --git a/scripts/qapi/parser.py b/scripts/qapi/parser.py index f03ba2cfec..1b006cdc13 100644 --- a/scripts/qapi/parser.py +++ b/scripts/qapi/parser.py @@ -18,6 +18,7 @@ from collections import OrderedDict import os import re from typing import ( + TYPE_CHECKING, Dict, List, Optional, @@ -30,9 +31,22 @@ from .error import QAPISemError, QAPISourceError from .source import QAPISourceInfo +if TYPE_CHECKING: + # pylint: disable=cyclic-import + # TODO: Remove cycle. [schema -> expr -> parser -> schema] + from .schema import QAPISchemaFeature, QAPISchemaMember + + +#: Represents a single Top Level QAPI schema expression. +TopLevelExpr = Dict[str, object] + # Return value alias for get_expr(). _ExprValue = Union[List[object], Dict[str, object], str, bool] +# FIXME: Consolidate and centralize definitions for TopLevelExpr, +# _ExprValue, _JSONValue, and _JSONObject; currently scattered across +# several modules. + class QAPIParseError(QAPISourceError): """Error class for all QAPI schema parsing errors.""" @@ -447,7 +461,10 @@ class QAPIDoc: """ class Section: - def __init__(self, parser, name=None, indent=0): + # pylint: disable=too-few-public-methods + def __init__(self, parser: QAPISchemaParser, + name: Optional[str] = None, indent: int = 0): + # parser, for error messages about indentation self._parser = parser # optional section name (argument/member or section name) @@ -456,7 +473,7 @@ class QAPIDoc: # the expected indent level of the text of this section self._indent = indent - def append(self, line): + def append(self, line: str) -> None: # Strip leading spaces corresponding to the expected indent level # Blank lines are always OK. if line: @@ -471,39 +488,47 @@ class QAPIDoc: self.text += line.rstrip() + '\n' class ArgSection(Section): - def __init__(self, parser, name, indent=0): + def __init__(self, parser: QAPISchemaParser, + name: str, indent: int = 0): super().__init__(parser, name, indent) - self.member = None + self.member: Optional['QAPISchemaMember'] = None - def connect(self, member): + def connect(self, member: 'QAPISchemaMember') -> None: self.member = member - def __init__(self, parser, info): + class NullSection(Section): + """ + Immutable dummy section for use at the end of a doc block. + """ + # pylint: disable=too-few-public-methods + def append(self, line: str) -> None: + assert False, "Text appended after end_comment() called." + + def __init__(self, parser: QAPISchemaParser, info: QAPISourceInfo): # self._parser is used to report errors with QAPIParseError. The # resulting error position depends on the state of the parser. # It happens to be the beginning of the comment. More or less # servicable, but action at a distance. self._parser = parser self.info = info - self.symbol = None + self.symbol: Optional[str] = None self.body = QAPIDoc.Section(parser) - # dict mapping parameter name to ArgSection - self.args = OrderedDict() - self.features = OrderedDict() - # a list of Section - self.sections = [] + # dicts mapping parameter/feature names to their ArgSection + self.args: Dict[str, QAPIDoc.ArgSection] = OrderedDict() + self.features: Dict[str, QAPIDoc.ArgSection] = OrderedDict() + self.sections: List[QAPIDoc.Section] = [] # the current section self._section = self.body self._append_line = self._append_body_line - def has_section(self, name): + def has_section(self, name: str) -> bool: """Return True if we have a section with this name.""" for i in self.sections: if i.name == name: return True return False - def append(self, line): + def append(self, line: str) -> None: """ Parse a comment line and add it to the documentation. @@ -524,18 +549,18 @@ class QAPIDoc: line = line[1:] self._append_line(line) - def end_comment(self): - self._end_section() + def end_comment(self) -> None: + self._switch_section(QAPIDoc.NullSection(self._parser)) @staticmethod - def _is_section_tag(name): + def _is_section_tag(name: str) -> bool: return name in ('Returns:', 'Since:', # those are often singular or plural 'Note:', 'Notes:', 'Example:', 'Examples:', 'TODO:') - def _append_body_line(self, line): + def _append_body_line(self, line: str) -> None: """ Process a line of documentation text in the body section. @@ -556,9 +581,11 @@ class QAPIDoc: if not line.endswith(':'): raise QAPIParseError(self._parser, "line should end with ':'") self.symbol = line[1:-1] - # FIXME invalid names other than the empty string aren't flagged + # Invalid names are not checked here, but the name provided MUST + # match the following definition, which *is* validated in expr.py. if not self.symbol: - raise QAPIParseError(self._parser, "invalid name") + raise QAPIParseError( + self._parser, "name required after '@'") elif self.symbol: # This is a definition documentation block if name.startswith('@') and name.endswith(':'): @@ -575,7 +602,7 @@ class QAPIDoc: # This is a free-form documentation block self._append_freeform(line) - def _append_args_line(self, line): + def _append_args_line(self, line: str) -> None: """ Process a line of documentation text in an argument section. @@ -621,7 +648,7 @@ class QAPIDoc: self._append_freeform(line) - def _append_features_line(self, line): + def _append_features_line(self, line: str) -> None: name = line.split(' ', 1)[0] if name.startswith('@') and name.endswith(':'): @@ -653,7 +680,7 @@ class QAPIDoc: self._append_freeform(line) - def _append_various_line(self, line): + def _append_various_line(self, line: str) -> None: """ Process a line of documentation text in an additional section. @@ -689,7 +716,11 @@ class QAPIDoc: self._append_freeform(line) - def _start_symbol_section(self, symbols_dict, name, indent): + def _start_symbol_section( + self, + symbols_dict: Dict[str, 'QAPIDoc.ArgSection'], + name: str, + indent: int) -> None: # FIXME invalid names other than the empty string aren't flagged if not name: raise QAPIParseError(self._parser, "invalid parameter name") @@ -697,34 +728,41 @@ class QAPIDoc: raise QAPIParseError(self._parser, "'%s' parameter name duplicated" % name) assert not self.sections - self._end_section() - self._section = QAPIDoc.ArgSection(self._parser, name, indent) - symbols_dict[name] = self._section + new_section = QAPIDoc.ArgSection(self._parser, name, indent) + self._switch_section(new_section) + symbols_dict[name] = new_section - def _start_args_section(self, name, indent): + def _start_args_section(self, name: str, indent: int) -> None: self._start_symbol_section(self.args, name, indent) - def _start_features_section(self, name, indent): + def _start_features_section(self, name: str, indent: int) -> None: self._start_symbol_section(self.features, name, indent) - def _start_section(self, name=None, indent=0): + def _start_section(self, name: Optional[str] = None, + indent: int = 0) -> None: if name in ('Returns', 'Since') and self.has_section(name): raise QAPIParseError(self._parser, "duplicated '%s' section" % name) - self._end_section() - self._section = QAPIDoc.Section(self._parser, name, indent) - self.sections.append(self._section) + new_section = QAPIDoc.Section(self._parser, name, indent) + self._switch_section(new_section) + self.sections.append(new_section) - def _end_section(self): - if self._section: - text = self._section.text = self._section.text.strip() - if self._section.name and (not text or text.isspace()): - raise QAPIParseError( - self._parser, - "empty doc section '%s'" % self._section.name) - self._section = None + def _switch_section(self, new_section: 'QAPIDoc.Section') -> None: + text = self._section.text = self._section.text.strip() - def _append_freeform(self, line): + # Only the 'body' section is allowed to have an empty body. + # All other sections, including anonymous ones, must have text. + if self._section != self.body and not text: + # We do not create anonymous sections unless there is + # something to put in them; this is a parser bug. + assert self._section.name + raise QAPIParseError( + self._parser, + "empty doc section '%s'" % self._section.name) + + self._section = new_section + + def _append_freeform(self, line: str) -> None: match = re.match(r'(@\S+:)', line) if match: raise QAPIParseError(self._parser, @@ -732,37 +770,41 @@ class QAPIDoc: % match.group(1)) self._section.append(line) - def connect_member(self, member): + def connect_member(self, member: 'QAPISchemaMember') -> None: if member.name not in self.args: # Undocumented TODO outlaw self.args[member.name] = QAPIDoc.ArgSection(self._parser, member.name) self.args[member.name].connect(member) - def connect_feature(self, feature): + def connect_feature(self, feature: 'QAPISchemaFeature') -> None: if feature.name not in self.features: raise QAPISemError(feature.info, "feature '%s' lacks documentation" % feature.name) self.features[feature.name].connect(feature) - def check_expr(self, expr): + def check_expr(self, expr: TopLevelExpr) -> None: if self.has_section('Returns') and 'command' not in expr: raise QAPISemError(self.info, "'Returns:' is only valid for commands") - def check(self): + def check(self) -> None: - def check_args_section(args, info, what): + def check_args_section( + args: Dict[str, QAPIDoc.ArgSection], what: str + ) -> None: bogus = [name for name, section in args.items() if not section.member] if bogus: raise QAPISemError( self.info, - "documented member%s '%s' %s not exist" - % ("s" if len(bogus) > 1 else "", - "', '".join(bogus), - "do" if len(bogus) > 1 else "does")) + "documented %s%s '%s' %s not exist" % ( + what, + "s" if len(bogus) > 1 else "", + "', '".join(bogus), + "do" if len(bogus) > 1 else "does" + )) - check_args_section(self.args, self.info, 'members') - check_args_section(self.features, self.info, 'features') + check_args_section(self.args, 'member') + check_args_section(self.features, 'feature') diff --git a/scripts/qapi/pylintrc b/scripts/qapi/pylintrc index c5275d5f59..b259531a72 100644 --- a/scripts/qapi/pylintrc +++ b/scripts/qapi/pylintrc @@ -2,8 +2,7 @@ # Add files or directories matching the regex patterns to the ignore list. # The regex matches against base names, not paths. -ignore-patterns=parser.py, - schema.py, +ignore-patterns=schema.py, [MESSAGES CONTROL] @@ -23,6 +22,7 @@ disable=fixme, too-many-branches, too-many-statements, too-many-instance-attributes, + consider-using-f-string, [REPORTS] diff --git a/scripts/qapi/schema.py b/scripts/qapi/schema.py index 004d7095ff..b7b3fc0ce4 100644 --- a/scripts/qapi/schema.py +++ b/scripts/qapi/schema.py @@ -254,9 +254,11 @@ class QAPISchemaType(QAPISchemaEntity): def check(self, schema): QAPISchemaEntity.check(self, schema) - if 'deprecated' in [f.name for f in self.features]: - raise QAPISemError( - self.info, "feature 'deprecated' is not supported for types") + for feat in self.features: + if feat.is_special(): + raise QAPISemError( + self.info, + f"feature '{feat.name}' is not supported for types") def describe(self): assert self.meta @@ -708,10 +710,26 @@ class QAPISchemaMember: class QAPISchemaEnumMember(QAPISchemaMember): role = 'value' + def __init__(self, name, info, ifcond=None, features=None): + super().__init__(name, info, ifcond) + for f in features or []: + assert isinstance(f, QAPISchemaFeature) + f.set_defined_in(name) + self.features = features or [] + + def connect_doc(self, doc): + super().connect_doc(doc) + if doc: + for f in self.features: + doc.connect_feature(f) + class QAPISchemaFeature(QAPISchemaMember): role = 'feature' + def is_special(self): + return self.name in ('deprecated', 'unstable') + class QAPISchemaObjectTypeMember(QAPISchemaMember): def __init__(self, name, info, typ, optional, ifcond=None, features=None): @@ -980,9 +998,14 @@ class QAPISchema: QAPISchemaIfCond(f.get('if'))) for f in features] + def _make_enum_member(self, name, ifcond, features, info): + return QAPISchemaEnumMember(name, info, + QAPISchemaIfCond(ifcond), + self._make_features(features, info)) + def _make_enum_members(self, values, info): - return [QAPISchemaEnumMember(v['name'], info, - QAPISchemaIfCond(v.get('if'))) + return [self._make_enum_member(v['name'], v.get('if'), + v.get('features'), info) for v in values] def _make_array_type(self, element_type, info): diff --git a/scripts/qapi/types.py b/scripts/qapi/types.py index 831294fe42..3013329c24 100644 --- a/scripts/qapi/types.py +++ b/scripts/qapi/types.py @@ -16,7 +16,7 @@ This work is licensed under the terms of the GNU GPL, version 2. from typing import List, Optional from .common import c_enum_const, c_name, mcgen -from .gen import QAPISchemaModularCVisitor, ifcontext +from .gen import QAPISchemaModularCVisitor, gen_special_features, ifcontext from .schema import ( QAPISchema, QAPISchemaEnumMember, @@ -38,6 +38,8 @@ objects_seen = set() def gen_enum_lookup(name: str, members: List[QAPISchemaEnumMember], prefix: Optional[str] = None) -> str: + max_index = c_enum_const(name, '_MAX', prefix) + feats = '' ret = mcgen(''' const QEnumLookup %(c_name)s_lookup = { @@ -53,12 +55,27 @@ const QEnumLookup %(c_name)s_lookup = { index=index, name=memb.name) ret += memb.ifcond.gen_endif() + special_features = gen_special_features(memb.features) + if special_features != '0': + feats += mcgen(''' + [%(index)s] = %(special_features)s, +''', + index=index, special_features=special_features) + + if feats: + ret += mcgen(''' + }, + .special_features = (const unsigned char[%(max_index)s]) { +''', + max_index=max_index) + ret += feats + ret += mcgen(''' }, .size = %(max_index)s }; ''', - max_index=c_enum_const(name, '_MAX', prefix)) + max_index=max_index) return ret diff --git a/scripts/qapi/visit.py b/scripts/qapi/visit.py index 9d9196a143..e13bbe4292 100644 --- a/scripts/qapi/visit.py +++ b/scripts/qapi/visit.py @@ -21,7 +21,7 @@ from .common import ( indent, mcgen, ) -from .gen import QAPISchemaModularCVisitor, ifcontext +from .gen import QAPISchemaModularCVisitor, gen_special_features, ifcontext from .schema import ( QAPISchema, QAPISchemaEnumMember, @@ -76,7 +76,6 @@ bool visit_type_%(c_name)s_members(Visitor *v, %(c_name)s *obj, Error **errp) c_type=base.c_name()) for memb in members: - deprecated = 'deprecated' in [f.name for f in memb.features] ret += memb.ifcond.gen_if() if memb.optional: ret += mcgen(''' @@ -84,14 +83,15 @@ bool visit_type_%(c_name)s_members(Visitor *v, %(c_name)s *obj, Error **errp) ''', name=memb.name, c_name=c_name(memb.name)) indent.increase() - if deprecated: + special_features = gen_special_features(memb.features) + if special_features != '0': ret += mcgen(''' - if (!visit_deprecated_accept(v, "%(name)s", errp)) { + if (visit_policy_reject(v, "%(name)s", %(special_features)s, errp)) { return false; } - if (visit_deprecated(v, "%(name)s")) { + if (!visit_policy_skip(v, "%(name)s", %(special_features)s)) { ''', - name=memb.name) + name=memb.name, special_features=special_features) indent.increase() ret += mcgen(''' if (!visit_type_%(c_type)s(v, "%(name)s", &obj->%(c_name)s, errp)) { @@ -100,7 +100,7 @@ bool visit_type_%(c_name)s_members(Visitor *v, %(c_name)s *obj, Error **errp) ''', c_type=memb.type.c_name(), name=memb.name, c_name=c_name(memb.name)) - if deprecated: + if special_features != '0': indent.decrease() ret += mcgen(''' } diff --git a/scripts/simplebench/bench_block_job.py b/scripts/simplebench/bench_block_job.py index 4f03c12169..a403c35b08 100755 --- a/scripts/simplebench/bench_block_job.py +++ b/scripts/simplebench/bench_block_job.py @@ -28,6 +28,7 @@ import json sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) from qemu.machine import QEMUMachine from qemu.qmp import QMPConnectError +from qemu.aqmp import ConnectError def bench_block_job(cmd, cmd_args, qemu_args): @@ -49,7 +50,7 @@ def bench_block_job(cmd, cmd_args, qemu_args): vm.launch() except OSError as e: return {'error': 'popen failed: ' + str(e)} - except (QMPConnectError, socket.timeout): + except (QMPConnectError, ConnectError, socket.timeout): return {'error': 'qemu failed: ' + str(vm.get_log())} try: diff --git a/semihosting/arm-compat-semi.c b/semihosting/arm-compat-semi.c index 01badea99c..37963becae 100644 --- a/semihosting/arm-compat-semi.c +++ b/semihosting/arm-compat-semi.c @@ -775,7 +775,7 @@ static inline bool is_64bit_semihosting(CPUArchState *env) #if defined(TARGET_ARM) return is_a64(env); #elif defined(TARGET_RISCV) - return !riscv_cpu_is_32bit(env); + return riscv_cpu_mxl(env) != MXL_RV32; #else #error un-handled architecture #endif diff --git a/softmmu/device_tree.c b/softmmu/device_tree.c index b621f63fba..3965c834ca 100644 --- a/softmmu/device_tree.c +++ b/softmmu/device_tree.c @@ -540,8 +540,8 @@ int qemu_fdt_add_subnode(void *fdt, const char *name) retval = fdt_add_subnode(fdt, parent, basename); if (retval < 0) { - error_report("FDT: Failed to create subnode %s: %s", name, - fdt_strerror(retval)); + error_report("%s: Failed to create subnode %s: %s", + __func__, name, fdt_strerror(retval)); exit(1); } @@ -549,6 +549,46 @@ int qemu_fdt_add_subnode(void *fdt, const char *name) return retval; } +/* + * qemu_fdt_add_path: Like qemu_fdt_add_subnode(), but will add + * all missing subnodes from the given path. + */ +int qemu_fdt_add_path(void *fdt, const char *path) +{ + const char *name; + const char *p = path; + int namelen, retval; + int parent = 0; + + if (path[0] != '/') { + return -1; + } + + while (p) { + name = p + 1; + p = strchr(name, '/'); + namelen = p != NULL ? p - name : strlen(name); + + retval = fdt_subnode_offset_namelen(fdt, parent, name, namelen); + if (retval < 0 && retval != -FDT_ERR_NOTFOUND) { + error_report("%s: Unexpected error in finding subnode %.*s: %s", + __func__, namelen, name, fdt_strerror(retval)); + exit(1); + } else if (retval == -FDT_ERR_NOTFOUND) { + retval = fdt_add_subnode_namelen(fdt, parent, name, namelen); + if (retval < 0) { + error_report("%s: Failed to create subnode %.*s: %s", + __func__, namelen, name, fdt_strerror(retval)); + exit(1); + } + } + + parent = retval; + } + + return retval; +} + void qemu_fdt_dumpdtb(void *fdt, int size) { const char *dumpdtb = current_machine->dumpdtb; diff --git a/softmmu/memory.c b/softmmu/memory.c index bfedaf9c4d..7340e19ff5 100644 --- a/softmmu/memory.c +++ b/softmmu/memory.c @@ -39,7 +39,7 @@ static unsigned memory_region_transaction_depth; static bool memory_region_update_pending; static bool ioeventfd_update_pending; -bool global_dirty_log; +unsigned int global_dirty_tracking; static QTAILQ_HEAD(, MemoryListener) memory_listeners = QTAILQ_HEAD_INITIALIZER(memory_listeners); @@ -1378,17 +1378,17 @@ bool memory_region_access_valid(MemoryRegion *mr, { if (mr->ops->valid.accepts && !mr->ops->valid.accepts(mr->opaque, addr, size, is_write, attrs)) { - qemu_log_mask(LOG_GUEST_ERROR, "Invalid access at addr " - "0x%" HWADDR_PRIX ", size %u, " - "region '%s', reason: rejected\n", + qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX + ", size %u, region '%s', reason: rejected\n", + is_write ? "write" : "read", addr, size, memory_region_name(mr)); return false; } if (!mr->ops->valid.unaligned && (addr & (size - 1))) { - qemu_log_mask(LOG_GUEST_ERROR, "Invalid access at addr " - "0x%" HWADDR_PRIX ", size %u, " - "region '%s', reason: unaligned\n", + qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX + ", size %u, region '%s', reason: unaligned\n", + is_write ? "write" : "read", addr, size, memory_region_name(mr)); return false; } @@ -1400,10 +1400,10 @@ bool memory_region_access_valid(MemoryRegion *mr, if (size > mr->ops->valid.max_access_size || size < mr->ops->valid.min_access_size) { - qemu_log_mask(LOG_GUEST_ERROR, "Invalid access at addr " - "0x%" HWADDR_PRIX ", size %u, " - "region '%s', reason: invalid size " - "(min:%u max:%u)\n", + qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX + ", size %u, region '%s', reason: invalid size " + "(min:%u max:%u)\n", + is_write ? "write" : "read", addr, size, memory_region_name(mr), mr->ops->valid.min_access_size, mr->ops->valid.max_access_size); @@ -1811,12 +1811,17 @@ bool memory_region_is_ram_device(MemoryRegion *mr) return mr->ram_device; } +bool memory_region_is_protected(MemoryRegion *mr) +{ + return mr->ram && (mr->ram_block->flags & RAM_PROTECTED); +} + uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr) { uint8_t mask = mr->dirty_log_mask; RAMBlock *rb = mr->ram_block; - if (global_dirty_log && ((rb && qemu_ram_is_migratable(rb)) || + if (global_dirty_tracking && ((rb && qemu_ram_is_migratable(rb)) || memory_region_is_iommu(mr))) { mask |= (1 << DIRTY_MEMORY_MIGRATION); } @@ -2076,6 +2081,17 @@ int ram_discard_manager_replay_populated(const RamDiscardManager *rdm, return rdmc->replay_populated(rdm, section, replay_fn, opaque); } +void ram_discard_manager_replay_discarded(const RamDiscardManager *rdm, + MemoryRegionSection *section, + ReplayRamDiscard replay_fn, + void *opaque) +{ + RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_GET_CLASS(rdm); + + g_assert(rdmc->replay_discarded); + rdmc->replay_discarded(rdm, section, replay_fn, opaque); +} + void ram_discard_manager_register_listener(RamDiscardManager *rdm, RamDiscardListener *rdl, MemoryRegionSection *section) @@ -2149,6 +2165,7 @@ static void memory_region_sync_dirty_bitmap(MemoryRegion *mr) } } flatview_unref(view); + trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 0); } else if (listener->log_sync_global) { /* * No matter whether MR is specified, what we can do here @@ -2156,6 +2173,7 @@ static void memory_region_sync_dirty_bitmap(MemoryRegion *mr) * sync in a finer granularity. */ listener->log_sync_global(listener); + trace_memory_region_sync_dirty(mr ? mr->name : "(all)", listener->name, 1); } } } @@ -2753,14 +2771,18 @@ void memory_global_after_dirty_log_sync(void) static VMChangeStateEntry *vmstate_change; -void memory_global_dirty_log_start(void) +void memory_global_dirty_log_start(unsigned int flags) { if (vmstate_change) { qemu_del_vm_change_state_handler(vmstate_change); vmstate_change = NULL; } - global_dirty_log = true; + assert(flags && !(flags & (~GLOBAL_DIRTY_MASK))); + assert(!(global_dirty_tracking & flags)); + global_dirty_tracking |= flags; + + trace_global_dirty_changed(global_dirty_tracking); MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward); @@ -2770,9 +2792,13 @@ void memory_global_dirty_log_start(void) memory_region_transaction_commit(); } -static void memory_global_dirty_log_do_stop(void) +static void memory_global_dirty_log_do_stop(unsigned int flags) { - global_dirty_log = false; + assert(flags && !(flags & (~GLOBAL_DIRTY_MASK))); + assert((global_dirty_tracking & flags) == flags); + global_dirty_tracking &= ~flags; + + trace_global_dirty_changed(global_dirty_tracking); /* Refresh DIRTY_MEMORY_MIGRATION bit. */ memory_region_transaction_begin(); @@ -2785,8 +2811,9 @@ static void memory_global_dirty_log_do_stop(void) static void memory_vm_change_state_handler(void *opaque, bool running, RunState state) { + unsigned int flags = (unsigned int)(uintptr_t)opaque; if (running) { - memory_global_dirty_log_do_stop(); + memory_global_dirty_log_do_stop(flags); if (vmstate_change) { qemu_del_vm_change_state_handler(vmstate_change); @@ -2795,18 +2822,19 @@ static void memory_vm_change_state_handler(void *opaque, bool running, } } -void memory_global_dirty_log_stop(void) +void memory_global_dirty_log_stop(unsigned int flags) { if (!runstate_is_running()) { if (vmstate_change) { return; } vmstate_change = qemu_add_vm_change_state_handler( - memory_vm_change_state_handler, NULL); + memory_vm_change_state_handler, + (void *)(uintptr_t)flags); return; } - memory_global_dirty_log_do_stop(); + memory_global_dirty_log_do_stop(flags); } static void listener_add_address_space(MemoryListener *listener, @@ -2818,7 +2846,7 @@ static void listener_add_address_space(MemoryListener *listener, if (listener->begin) { listener->begin(listener); } - if (global_dirty_log) { + if (global_dirty_tracking) { if (listener->log_global_start) { listener->log_global_start(listener); } diff --git a/softmmu/memory_mapping.c b/softmmu/memory_mapping.c index e7af276546..a62eaa49cc 100644 --- a/softmmu/memory_mapping.c +++ b/softmmu/memory_mapping.c @@ -193,29 +193,14 @@ typedef struct GuestPhysListener { MemoryListener listener; } GuestPhysListener; -static void guest_phys_blocks_region_add(MemoryListener *listener, +static void guest_phys_block_add_section(GuestPhysListener *g, MemoryRegionSection *section) { - GuestPhysListener *g; - uint64_t section_size; - hwaddr target_start, target_end; - uint8_t *host_addr; - GuestPhysBlock *predecessor; - - /* we only care about RAM */ - if (!memory_region_is_ram(section->mr) || - memory_region_is_ram_device(section->mr) || - memory_region_is_nonvolatile(section->mr)) { - return; - } - - g = container_of(listener, GuestPhysListener, listener); - section_size = int128_get64(section->size); - target_start = section->offset_within_address_space; - target_end = target_start + section_size; - host_addr = memory_region_get_ram_ptr(section->mr) + - section->offset_within_region; - predecessor = NULL; + const hwaddr target_start = section->offset_within_address_space; + const hwaddr target_end = target_start + int128_get64(section->size); + uint8_t *host_addr = memory_region_get_ram_ptr(section->mr) + + section->offset_within_region; + GuestPhysBlock *predecessor = NULL; /* find continuity in guest physical address space */ if (!QTAILQ_EMPTY(&g->list->head)) { @@ -229,7 +214,8 @@ static void guest_phys_blocks_region_add(MemoryListener *listener, /* we want continuity in both guest-physical and host-virtual memory */ if (predecessor->target_end < target_start || - predecessor->host_addr + predecessor_size != host_addr) { + predecessor->host_addr + predecessor_size != host_addr || + predecessor->mr != section->mr) { predecessor = NULL; } } @@ -260,6 +246,40 @@ static void guest_phys_blocks_region_add(MemoryListener *listener, #endif } +static int guest_phys_ram_populate_cb(MemoryRegionSection *section, + void *opaque) +{ + GuestPhysListener *g = opaque; + + guest_phys_block_add_section(g, section); + return 0; +} + +static void guest_phys_blocks_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + GuestPhysListener *g = container_of(listener, GuestPhysListener, listener); + + /* we only care about RAM */ + if (!memory_region_is_ram(section->mr) || + memory_region_is_ram_device(section->mr) || + memory_region_is_nonvolatile(section->mr)) { + return; + } + + /* for special sparse regions, only add populated parts */ + if (memory_region_has_ram_discard_manager(section->mr)) { + RamDiscardManager *rdm; + + rdm = memory_region_get_ram_discard_manager(section->mr); + ram_discard_manager_replay_populated(rdm, section, + guest_phys_ram_populate_cb, g); + return; + } + + guest_phys_block_add_section(g, section); +} + void guest_phys_blocks_append(GuestPhysBlockList *list) { GuestPhysListener g = { 0 }; diff --git a/softmmu/physmem.c b/softmmu/physmem.c index 23e77cb771..314f8b439c 100644 --- a/softmmu/physmem.c +++ b/softmmu/physmem.c @@ -756,6 +756,7 @@ void cpu_address_space_init(CPUState *cpu, int asidx, if (tcg_enabled()) { newas->tcg_as_listener.log_global_after_sync = tcg_log_global_after_sync; newas->tcg_as_listener.commit = tcg_commit; + newas->tcg_as_listener.name = "tcg"; memory_listener_register(&newas->tcg_as_listener, as); } } @@ -928,29 +929,26 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len, } wp->hitaddr = MAX(addr, wp->vaddr); wp->hitattrs = attrs; - if (!cpu->watchpoint_hit) { - if (wp->flags & BP_CPU && cc->tcg_ops->debug_check_watchpoint && - !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) { - wp->flags &= ~BP_WATCHPOINT_HIT; - continue; - } - cpu->watchpoint_hit = wp; - mmap_lock(); - tb_check_watchpoint(cpu, ra); - if (wp->flags & BP_STOP_BEFORE_ACCESS) { - cpu->exception_index = EXCP_DEBUG; - mmap_unlock(); - cpu_loop_exit_restore(cpu, ra); - } else { - /* Force execution of one insn next time. */ - cpu->cflags_next_tb = 1 | curr_cflags(cpu); - mmap_unlock(); - if (ra) { - cpu_restore_state(cpu, ra, true); - } - cpu_loop_exit_noexc(cpu); - } + if (wp->flags & BP_CPU && cc->tcg_ops->debug_check_watchpoint && + !cc->tcg_ops->debug_check_watchpoint(cpu, wp)) { + wp->flags &= ~BP_WATCHPOINT_HIT; + continue; + } + cpu->watchpoint_hit = wp; + + mmap_lock(); + /* This call also restores vCPU state */ + tb_check_watchpoint(cpu, ra); + if (wp->flags & BP_STOP_BEFORE_ACCESS) { + cpu->exception_index = EXCP_DEBUG; + mmap_unlock(); + cpu_loop_exit(cpu); + } else { + /* Force execution of one insn next time. */ + cpu->cflags_next_tb = 1 | CF_LAST_IO | curr_cflags(cpu); + mmap_unlock(); + cpu_loop_exit_noexc(cpu); } } else { wp->flags &= ~BP_WATCHPOINT_HIT; @@ -1298,23 +1296,26 @@ void qemu_mutex_unlock_ramlist(void) qemu_mutex_unlock(&ram_list.mutex); } -void ram_block_dump(Monitor *mon) +GString *ram_block_format(void) { RAMBlock *block; char *psize; + GString *buf = g_string_new(""); RCU_READ_LOCK_GUARD(); - monitor_printf(mon, "%24s %8s %18s %18s %18s\n", - "Block Name", "PSize", "Offset", "Used", "Total"); + g_string_append_printf(buf, "%24s %8s %18s %18s %18s\n", + "Block Name", "PSize", "Offset", "Used", "Total"); RAMBLOCK_FOREACH(block) { psize = size_to_str(block->page_size); - monitor_printf(mon, "%24s %8s 0x%016" PRIx64 " 0x%016" PRIx64 - " 0x%016" PRIx64 "\n", block->idstr, psize, - (uint64_t)block->offset, - (uint64_t)block->used_length, - (uint64_t)block->max_length); + g_string_append_printf(buf, "%24s %8s 0x%016" PRIx64 " 0x%016" PRIx64 + " 0x%016" PRIx64 "\n", block->idstr, psize, + (uint64_t)block->offset, + (uint64_t)block->used_length, + (uint64_t)block->max_length); g_free(psize); } + + return buf; } #ifdef __linux__ @@ -2055,7 +2056,8 @@ RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr, int64_t file_size, file_align; /* Just support these ram flags by now. */ - assert((ram_flags & ~(RAM_SHARED | RAM_PMEM | RAM_NORESERVE)) == 0); + assert((ram_flags & ~(RAM_SHARED | RAM_PMEM | RAM_NORESERVE | + RAM_PROTECTED)) == 0); if (xen_enabled()) { error_setg(errp, "-mem-path not supported with Xen"); @@ -2631,7 +2633,7 @@ static void tcg_log_global_after_sync(MemoryListener *listener) * In record/replay mode this causes a deadlock, because * run_on_cpu waits for rr mutex. Therefore no races are possible * in this case and no need for making run_on_cpu when - * record/replay is not enabled. + * record/replay is enabled. */ cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener); run_on_cpu(cpuas->cpu, do_nothing, RUN_ON_CPU_NULL); diff --git a/softmmu/qdev-monitor.c b/softmmu/qdev-monitor.c index 0705f00846..b5aaae4b8c 100644 --- a/softmmu/qdev-monitor.c +++ b/softmmu/qdev-monitor.c @@ -28,6 +28,8 @@ #include "qapi/qmp/dispatch.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp/qerror.h" +#include "qapi/qmp/qstring.h" +#include "qapi/qobject-input-visitor.h" #include "qemu/config-file.h" #include "qemu/error-report.h" #include "qemu/help_option.h" @@ -40,6 +42,7 @@ #include "qemu/cutils.h" #include "hw/qdev-properties.h" #include "hw/clock.h" +#include "hw/boards.h" /* * Aliases were a bad idea from the start. Let's keep them @@ -162,6 +165,7 @@ static void qdev_print_devinfos(bool show_no_user) [DEVICE_CATEGORY_SOUND] = "Sound", [DEVICE_CATEGORY_MISC] = "Misc", [DEVICE_CATEGORY_CPU] = "CPU", + [DEVICE_CATEGORY_WATCHDOG]= "Watchdog", [DEVICE_CATEGORY_MAX] = "Uncategorized", }; GSList *list, *elt; @@ -194,22 +198,6 @@ static void qdev_print_devinfos(bool show_no_user) g_slist_free(list); } -static int set_property(void *opaque, const char *name, const char *value, - Error **errp) -{ - Object *obj = opaque; - - if (strcmp(name, "driver") == 0) - return 0; - if (strcmp(name, "bus") == 0) - return 0; - - if (!object_property_parse(obj, name, value, errp)) { - return -1; - } - return 0; -} - static const char *find_typename_by_alias(const char *alias) { int i; @@ -268,6 +256,16 @@ static DeviceClass *qdev_get_device_class(const char **driver, Error **errp) return NULL; } + if (object_class_dynamic_cast(oc, TYPE_SYS_BUS_DEVICE)) { + /* sysbus devices need to be allowed by the machine */ + MachineClass *mc = MACHINE_CLASS(object_get_class(qdev_get_machine())); + if (!device_type_is_dynamic_sysbus(mc, *driver)) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "driver", + "a dynamic sysbus device type for the machine"); + return NULL; + } + } + return dc; } @@ -578,32 +576,49 @@ static BusState *qbus_find(const char *path, Error **errp) return bus; } -void qdev_set_id(DeviceState *dev, const char *id) +/* Takes ownership of @id, will be freed when deleting the device */ +const char *qdev_set_id(DeviceState *dev, char *id, Error **errp) { - if (id) { - dev->id = id; - } + ObjectProperty *prop; - if (dev->id) { - object_property_add_child(qdev_get_peripheral(), dev->id, - OBJECT(dev)); + assert(!dev->id && !dev->realized); + + /* + * object_property_[try_]add_child() below will assert the device + * has no parent + */ + if (id) { + prop = object_property_try_add_child(qdev_get_peripheral(), id, + OBJECT(dev), NULL); + if (prop) { + dev->id = id; + } else { + g_free(id); + error_setg(errp, "Duplicate device ID '%s'", id); + return NULL; + } } else { static int anon_count; gchar *name = g_strdup_printf("device[%d]", anon_count++); - object_property_add_child(qdev_get_peripheral_anon(), name, - OBJECT(dev)); + prop = object_property_add_child(qdev_get_peripheral_anon(), name, + OBJECT(dev)); g_free(name); } + + return prop->name; } -DeviceState *qdev_device_add(QemuOpts *opts, Error **errp) +DeviceState *qdev_device_add_from_qdict(const QDict *opts, + bool from_json, Error **errp) { + ERRP_GUARD(); DeviceClass *dc; const char *driver, *path; + char *id; DeviceState *dev = NULL; BusState *bus = NULL; - driver = qemu_opt_get(opts, "driver"); + driver = qdict_get_try_str(opts, "driver"); if (!driver) { error_setg(errp, QERR_MISSING_PARAMETER, "driver"); return NULL; @@ -616,7 +631,7 @@ DeviceState *qdev_device_add(QemuOpts *opts, Error **errp) } /* find bus */ - path = qemu_opt_get(opts, "bus"); + path = qdict_get_try_str(opts, "bus"); if (path != NULL) { bus = qbus_find(path, errp); if (!bus) { @@ -636,17 +651,13 @@ DeviceState *qdev_device_add(QemuOpts *opts, Error **errp) } } - if (qemu_opt_get(opts, "failover_pair_id")) { - if (!opts->id) { - error_setg(errp, "Device with failover_pair_id don't have id"); - return NULL; - } - if (qdev_should_hide_device(opts)) { - if (bus && !qbus_is_hotpluggable(bus)) { - error_setg(errp, QERR_BUS_NO_HOTPLUG, bus->name); - } - return NULL; + if (qdev_should_hide_device(opts, from_json, errp)) { + if (bus && !qbus_is_hotpluggable(bus)) { + error_setg(errp, QERR_BUS_NO_HOTPLUG, bus->name); } + return NULL; + } else if (*errp) { + return NULL; } if (phase_check(PHASE_MACHINE_READY) && bus && !qbus_is_hotpluggable(bus)) { @@ -676,16 +687,28 @@ DeviceState *qdev_device_add(QemuOpts *opts, Error **errp) } } - qdev_set_id(dev, qemu_opts_id(opts)); - - /* set properties */ - if (qemu_opt_foreach(opts, set_property, dev, errp)) { + /* + * set dev's parent and register its id. + * If it fails it means the id is already taken. + */ + id = g_strdup(qdict_get_try_str(opts, "id")); + if (!qdev_set_id(dev, id, errp)) { + goto err_del_dev; + } + + /* set properties */ + dev->opts = qdict_clone_shallow(opts); + qdict_del(dev->opts, "driver"); + qdict_del(dev->opts, "bus"); + qdict_del(dev->opts, "id"); + + object_set_properties_from_keyval(&dev->parent_obj, dev->opts, from_json, + errp); + if (*errp) { goto err_del_dev; } - dev->opts = opts; if (!qdev_realize(DEVICE(dev), bus, errp)) { - dev->opts = NULL; goto err_del_dev; } return dev; @@ -698,6 +721,19 @@ err_del_dev: return NULL; } +/* Takes ownership of @opts on success */ +DeviceState *qdev_device_add(QemuOpts *opts, Error **errp) +{ + QDict *qdict = qemu_opts_to_qdict(opts, NULL); + DeviceState *ret; + + ret = qdev_device_add_from_qdict(qdict, false, errp); + if (ret) { + qemu_opts_del(opts); + } + qobject_unref(qdict); + return ret; +} #define qdev_printf(fmt, ...) monitor_printf(mon, "%*s" fmt, indent, "", ## __VA_ARGS__) static void qbus_print(Monitor *mon, BusState *bus, int indent); @@ -835,18 +871,8 @@ void qmp_device_add(QDict *qdict, QObject **ret_data, Error **errp) static DeviceState *find_device_state(const char *id, Error **errp) { - Object *obj; - - if (id[0] == '/') { - obj = object_resolve_path(id, NULL); - } else { - char *root_path = object_get_canonical_path(qdev_get_peripheral()); - char *path = g_strdup_printf("%s/%s", root_path, id); - - g_free(root_path); - obj = object_resolve_path_type(path, TYPE_DEVICE, NULL); - g_free(path); - } + Object *obj = object_resolve_path_at(qdev_get_peripheral(), id); + DeviceState *dev; if (!obj) { error_set(errp, ERROR_CLASS_DEVICE_NOT_FOUND, @@ -854,12 +880,13 @@ static DeviceState *find_device_state(const char *id, Error **errp) return NULL; } - if (!object_dynamic_cast(obj, TYPE_DEVICE)) { + dev = (DeviceState *)object_dynamic_cast(obj, TYPE_DEVICE); + if (!dev) { error_setg(errp, "%s is not a hotpluggable device", id); return NULL; } - return DEVICE(obj); + return dev; } void qdev_unplug(DeviceState *dev, Error **errp) diff --git a/softmmu/trace-events b/softmmu/trace-events index 7b278590a0..9c88887b3c 100644 --- a/softmmu/trace-events +++ b/softmmu/trace-events @@ -15,9 +15,11 @@ memory_region_subpage_read(int cpu_index, void *mr, uint64_t offset, uint64_t va memory_region_subpage_write(int cpu_index, void *mr, uint64_t offset, uint64_t value, unsigned size) "cpu %d mr %p offset 0x%"PRIx64" value 0x%"PRIx64" size %u" memory_region_ram_device_read(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u" memory_region_ram_device_write(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr 0x%"PRIx64" value 0x%"PRIx64" size %u" +memory_region_sync_dirty(const char *mr, const char *listener, int global) "mr '%s' listener '%s' synced (global=%d)" flatview_new(void *view, void *root) "%p (root %p)" flatview_destroy(void *view, void *root) "%p (root %p)" flatview_destroy_rcu(void *view, void *root) "%p (root %p)" +global_dirty_changed(unsigned int bitmask) "bitmask 0x%"PRIx32 # softmmu.c vm_stop_flush_all(int ret) "ret %d" diff --git a/softmmu/vl.c b/softmmu/vl.c index 55ab70eb97..1159a64bce 100644 --- a/softmmu/vl.c +++ b/softmmu/vl.c @@ -144,6 +144,12 @@ typedef struct ObjectOption { QTAILQ_ENTRY(ObjectOption) next; } ObjectOption; +typedef struct DeviceOption { + QDict *opts; + Location loc; + QTAILQ_ENTRY(DeviceOption) next; +} DeviceOption; + static const char *cpu_option; static const char *mem_path; static const char *incoming; @@ -151,6 +157,7 @@ static const char *loadvm; static const char *accelerators; static QDict *machine_opts_dict; static QTAILQ_HEAD(, ObjectOption) object_opts = QTAILQ_HEAD_INITIALIZER(object_opts); +static QTAILQ_HEAD(, DeviceOption) device_opts = QTAILQ_HEAD_INITIALIZER(device_opts); static ram_addr_t maxram_size; static uint64_t ram_slots; static int display_remote; @@ -494,21 +501,39 @@ const char *qemu_get_vm_name(void) return qemu_name; } -static int default_driver_check(void *opaque, QemuOpts *opts, Error **errp) +static void default_driver_disable(const char *driver) { - const char *driver = qemu_opt_get(opts, "driver"); int i; - if (!driver) - return 0; + if (!driver) { + return; + } + for (i = 0; i < ARRAY_SIZE(default_list); i++) { if (strcmp(default_list[i].driver, driver) != 0) continue; *(default_list[i].flag) = 0; } +} + +static int default_driver_check(void *opaque, QemuOpts *opts, Error **errp) +{ + const char *driver = qemu_opt_get(opts, "driver"); + + default_driver_disable(driver); return 0; } +static void default_driver_check_json(void) +{ + DeviceOption *opt; + + QTAILQ_FOREACH(opt, &device_opts, next) { + const char *driver = qdict_get_try_str(opt->opts, "driver"); + default_driver_disable(driver); + } +} + static int parse_name(void *opaque, QemuOpts *opts, Error **errp) { const char *proc_name; @@ -1311,6 +1336,7 @@ static void qemu_disable_default_devices(void) { MachineClass *machine_class = MACHINE_GET_CLASS(current_machine); + default_driver_check_json(); qemu_opts_foreach(qemu_find_opts("device"), default_driver_check, NULL, NULL); qemu_opts_foreach(qemu_find_opts("global"), @@ -2637,6 +2663,8 @@ static void qemu_init_board(void) static void qemu_create_cli_devices(void) { + DeviceOption *opt; + soundhw_init(); qemu_opts_foreach(qemu_find_opts("fw_cfg"), @@ -2652,6 +2680,18 @@ static void qemu_create_cli_devices(void) rom_set_order_override(FW_CFG_ORDER_OVERRIDE_DEVICE); qemu_opts_foreach(qemu_find_opts("device"), device_init_func, NULL, &error_fatal); + QTAILQ_FOREACH(opt, &device_opts, next) { + loc_push_restore(&opt->loc); + /* + * TODO Eventually we should call qmp_device_add() here to make sure it + * behaves the same, but QMP still has to accept incorrectly typed + * options until libvirt is fixed and we want to be strict on the CLI + * from the start, so call qdev_device_add_from_qdict() directly for + * now. + */ + qdev_device_add_from_qdict(opt->opts, true, &error_fatal); + loc_pop(&opt->loc); + } rom_reset_order_override(); } @@ -3216,6 +3256,7 @@ void qemu_init(int argc, char **argv, char **envp) error_report("only one watchdog option may be given"); exit(1); } + warn_report("-watchdog is deprecated; use -device instead."); watchdog = optarg; break; case QEMU_OPTION_action: @@ -3224,12 +3265,12 @@ void qemu_init(int argc, char **argv, char **envp) exit(1); } break; - case QEMU_OPTION_watchdog_action: - if (select_watchdog_action(optarg) == -1) { - error_report("unknown -watchdog-action parameter"); - exit(1); - } + case QEMU_OPTION_watchdog_action: { + QemuOpts *opts; + opts = qemu_opts_create(qemu_find_opts("action"), NULL, 0, &error_abort); + qemu_opt_set(opts, "watchdog", optarg, &error_abort); break; + } case QEMU_OPTION_parallel: add_device_config(DEV_PARALLEL, optarg); default_parallel = 0; @@ -3352,9 +3393,18 @@ void qemu_init(int argc, char **argv, char **envp) add_device_config(DEV_USB, optarg); break; case QEMU_OPTION_device: - if (!qemu_opts_parse_noisily(qemu_find_opts("device"), - optarg, true)) { - exit(1); + if (optarg[0] == '{') { + QObject *obj = qobject_from_json(optarg, &error_fatal); + DeviceOption *opt = g_new0(DeviceOption, 1); + opt->opts = qobject_to(QDict, obj); + loc_save(&opt->loc); + assert(opt->opts != NULL); + QTAILQ_INSERT_TAIL(&device_opts, opt, next); + } else { + if (!qemu_opts_parse_noisily(qemu_find_opts("device"), + optarg, true)) { + exit(1); + } } break; case QEMU_OPTION_smp: diff --git a/storage-daemon/qemu-storage-daemon.c b/storage-daemon/qemu-storage-daemon.c index fc8b150629..52cf17e8ac 100644 --- a/storage-daemon/qemu-storage-daemon.c +++ b/storage-daemon/qemu-storage-daemon.c @@ -98,10 +98,12 @@ static void help(void) " export the specified block node over NBD\n" " (requires --nbd-server)\n" "\n" +#ifdef CONFIG_FUSE " --export [type=]fuse,id=,node-name=,mountpoint=\n" " [,growable=on|off][,writable=on|off]\n" " export the specified block node over FUSE\n" "\n" +#endif /* CONFIG_FUSE */ " --monitor [chardev=]name[,mode=control][,pretty[=on|off]]\n" " configure a QMP monitor\n" "\n" @@ -144,7 +146,8 @@ static void init_qmp_commands(void) QTAILQ_INIT(&qmp_cap_negotiation_commands); qmp_register_command(&qmp_cap_negotiation_commands, "qmp_capabilities", - qmp_marshal_qmp_capabilities, QCO_ALLOW_PRECONFIG); + qmp_marshal_qmp_capabilities, + QCO_ALLOW_PRECONFIG, 0); } static int getopt_set_loc(int argc, char **argv, const char *optstring, diff --git a/stubs/meson.build b/stubs/meson.build index beee31ec73..71469c1d50 100644 --- a/stubs/meson.build +++ b/stubs/meson.build @@ -20,7 +20,9 @@ endif stub_ss.add(files('iothread-lock.c')) stub_ss.add(files('isa-bus.c')) stub_ss.add(files('is-daemonized.c')) -stub_ss.add(when: 'CONFIG_LINUX_AIO', if_true: files('linux-aio.c')) +if libaio.found() + stub_ss.add(files('linux-aio.c')) +endif stub_ss.add(files('migr-blocker.c')) stub_ss.add(files('module-opts.c')) stub_ss.add(files('monitor.c')) @@ -29,6 +31,7 @@ stub_ss.add(files('pci-bus.c')) stub_ss.add(files('qemu-timer-notify-cb.c')) stub_ss.add(files('qmp_memory_device.c')) stub_ss.add(files('qmp-command-available.c')) +stub_ss.add(files('qmp-quit.c')) stub_ss.add(files('qtest.c')) stub_ss.add(files('ram-block.c')) stub_ss.add(files('ramfb.c')) diff --git a/stubs/qdev.c b/stubs/qdev.c index 92e6143134..187659f707 100644 --- a/stubs/qdev.c +++ b/stubs/qdev.c @@ -21,3 +21,10 @@ void qapi_event_send_device_deleted(bool has_device, { /* Nothing to do. */ } + +void qapi_event_send_device_unplug_guest_error(bool has_device, + const char *device, + const char *path) +{ + /* Nothing to do. */ +} diff --git a/stubs/qmp-quit.c b/stubs/qmp-quit.c new file mode 100644 index 0000000000..a3ff47f7bd --- /dev/null +++ b/stubs/qmp-quit.c @@ -0,0 +1,8 @@ +#include "qemu/osdep.h" +#include "qapi/qapi-commands-control.h" +#include "qapi/qmp/dispatch.h" + +void qmp_quit(Error **errp) +{ + g_assert_not_reached(); +} diff --git a/stubs/usb-dev-stub.c b/stubs/usb-dev-stub.c index b1adeeb454..aa557692b7 100644 --- a/stubs/usb-dev-stub.c +++ b/stubs/usb-dev-stub.c @@ -8,6 +8,8 @@ #include "qemu/osdep.h" #include "qemu/error-report.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-machine.h" #include "sysemu/sysemu.h" #include "monitor/monitor.h" #include "hw/usb.h" @@ -19,6 +21,12 @@ USBDevice *usbdevice_create(const char *driver) return NULL; } +HumanReadableText *qmp_x_query_usb(Error **errp) +{ + error_setg(errp, "Support for USB devices not built-in"); + return NULL; +} + void hmp_info_usb(Monitor *mon, const QDict *qdict) { monitor_printf(mon, "Support for USB devices not built-in\n"); diff --git a/subprojects/libvhost-user/libvhost-user.c b/subprojects/libvhost-user/libvhost-user.c index bf09693255..787f4d2d4f 100644 --- a/subprojects/libvhost-user/libvhost-user.c +++ b/subprojects/libvhost-user/libvhost-user.c @@ -816,6 +816,7 @@ vu_rem_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { shadow_regions[j].gpa = dev->regions[i].gpa; shadow_regions[j].size = dev->regions[i].size; shadow_regions[j].qva = dev->regions[i].qva; + shadow_regions[j].mmap_addr = dev->regions[i].mmap_addr; shadow_regions[j].mmap_offset = dev->regions[i].mmap_offset; j++; } else { diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c index 93e16a2ffb..a8990d401b 100644 --- a/target/alpha/cpu.c +++ b/target/alpha/cpu.c @@ -218,9 +218,12 @@ static const struct SysemuCPUOps alpha_sysemu_ops = { static const struct TCGCPUOps alpha_tcg_ops = { .initialize = alpha_translate_init, - .tlb_fill = alpha_cpu_tlb_fill, -#ifndef CONFIG_USER_ONLY +#ifdef CONFIG_USER_ONLY + .record_sigsegv = alpha_cpu_record_sigsegv, + .record_sigbus = alpha_cpu_record_sigbus, +#else + .tlb_fill = alpha_cpu_tlb_fill, .cpu_exec_interrupt = alpha_cpu_exec_interrupt, .do_interrupt = alpha_cpu_do_interrupt, .do_transaction_failed = alpha_cpu_do_transaction_failed, diff --git a/target/alpha/cpu.h b/target/alpha/cpu.h index 772828cc26..afd975c878 100644 --- a/target/alpha/cpu.h +++ b/target/alpha/cpu.h @@ -282,9 +282,6 @@ void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags); hwaddr alpha_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int alpha_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int alpha_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); -void alpha_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, - MMUAccessType access_type, int mmu_idx, - uintptr_t retaddr) QEMU_NORETURN; #define cpu_list alpha_cpu_list @@ -439,9 +436,6 @@ void alpha_translate_init(void); #define CPU_RESOLVING_TYPE TYPE_ALPHA_CPU void alpha_cpu_list(void); -bool alpha_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); void QEMU_NORETURN dynamic_excp(CPUAlphaState *, uintptr_t, int, int); void QEMU_NORETURN arith_excp(CPUAlphaState *, uintptr_t, int, uint64_t); @@ -449,7 +443,20 @@ uint64_t cpu_alpha_load_fpcr (CPUAlphaState *env); void cpu_alpha_store_fpcr (CPUAlphaState *env, uint64_t val); uint64_t cpu_alpha_load_gr(CPUAlphaState *env, unsigned reg); void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val); -#ifndef CONFIG_USER_ONLY + +#ifdef CONFIG_USER_ONLY +void alpha_cpu_record_sigsegv(CPUState *cs, vaddr address, + MMUAccessType access_type, + bool maperr, uintptr_t retaddr); +void alpha_cpu_record_sigbus(CPUState *cs, vaddr address, + MMUAccessType access_type, uintptr_t retaddr); +#else +bool alpha_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); +void alpha_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, + MMUAccessType access_type, int mmu_idx, + uintptr_t retaddr) QEMU_NORETURN; void alpha_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, unsigned size, MMUAccessType access_type, diff --git a/target/alpha/helper.c b/target/alpha/helper.c index 81550d9e2f..b7e7f73b15 100644 --- a/target/alpha/helper.c +++ b/target/alpha/helper.c @@ -120,15 +120,44 @@ void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val) } #if defined(CONFIG_USER_ONLY) -bool alpha_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) +void alpha_cpu_record_sigsegv(CPUState *cs, vaddr address, + MMUAccessType access_type, + bool maperr, uintptr_t retaddr) { AlphaCPU *cpu = ALPHA_CPU(cs); + target_ulong mmcsr, cause; - cs->exception_index = EXCP_MMFAULT; + /* Assuming !maperr, infer the missing protection. */ + switch (access_type) { + case MMU_DATA_LOAD: + mmcsr = MM_K_FOR; + cause = 0; + break; + case MMU_DATA_STORE: + mmcsr = MM_K_FOW; + cause = 1; + break; + case MMU_INST_FETCH: + mmcsr = MM_K_FOE; + cause = -1; + break; + default: + g_assert_not_reached(); + } + if (maperr) { + if (address < BIT_ULL(TARGET_VIRT_ADDR_SPACE_BITS - 1)) { + /* Userspace address, therefore page not mapped. */ + mmcsr = MM_K_TNV; + } else { + /* Kernel or invalid address. */ + mmcsr = MM_K_ACV; + } + } + + /* Record the arguments that PALcode would give to the kernel. */ cpu->env.trap_arg0 = address; - cpu_loop_exit_restore(cs, retaddr); + cpu->env.trap_arg1 = mmcsr; + cpu->env.trap_arg2 = cause; } #else /* Returns the OSF/1 entMM failure indication, or -1 on success. */ diff --git a/target/alpha/mem_helper.c b/target/alpha/mem_helper.c index 75e72bc337..47283a0612 100644 --- a/target/alpha/mem_helper.c +++ b/target/alpha/mem_helper.c @@ -23,18 +23,12 @@ #include "exec/exec-all.h" #include "exec/cpu_ldst.h" -/* Softmmu support */ -#ifndef CONFIG_USER_ONLY -void alpha_cpu_do_unaligned_access(CPUState *cs, vaddr addr, - MMUAccessType access_type, - int mmu_idx, uintptr_t retaddr) +static void do_unaligned_access(CPUAlphaState *env, vaddr addr, uintptr_t retaddr) { - AlphaCPU *cpu = ALPHA_CPU(cs); - CPUAlphaState *env = &cpu->env; uint64_t pc; uint32_t insn; - cpu_restore_state(cs, retaddr, true); + cpu_restore_state(env_cpu(env), retaddr, true); pc = env->pc; insn = cpu_ldl_code(env, pc); @@ -42,6 +36,26 @@ void alpha_cpu_do_unaligned_access(CPUState *cs, vaddr addr, env->trap_arg0 = addr; env->trap_arg1 = insn >> 26; /* opcode */ env->trap_arg2 = (insn >> 21) & 31; /* dest regno */ +} + +#ifdef CONFIG_USER_ONLY +void alpha_cpu_record_sigbus(CPUState *cs, vaddr addr, + MMUAccessType access_type, uintptr_t retaddr) +{ + AlphaCPU *cpu = ALPHA_CPU(cs); + CPUAlphaState *env = &cpu->env; + + do_unaligned_access(env, addr, retaddr); +} +#else +void alpha_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr) +{ + AlphaCPU *cpu = ALPHA_CPU(cs); + CPUAlphaState *env = &cpu->env; + + do_unaligned_access(env, addr, retaddr); cs->exception_index = EXCP_UNALIGN; env->error_code = 0; cpu_loop_exit(cs); diff --git a/target/alpha/translate.c b/target/alpha/translate.c index b034206688..a4c3f43e72 100644 --- a/target/alpha/translate.c +++ b/target/alpha/translate.c @@ -267,51 +267,51 @@ static inline DisasJumpType gen_invalid(DisasContext *ctx) return gen_excp(ctx, EXCP_OPCDEC, 0); } -static inline void gen_qemu_ldf(TCGv t0, TCGv t1, int flags) +static void gen_ldf(DisasContext *ctx, TCGv dest, TCGv addr) { TCGv_i32 tmp32 = tcg_temp_new_i32(); - tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL); - gen_helper_memory_to_f(t0, tmp32); + tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL); + gen_helper_memory_to_f(dest, tmp32); tcg_temp_free_i32(tmp32); } -static inline void gen_qemu_ldg(TCGv t0, TCGv t1, int flags) +static void gen_ldg(DisasContext *ctx, TCGv dest, TCGv addr) { TCGv tmp = tcg_temp_new(); - tcg_gen_qemu_ld_i64(tmp, t1, flags, MO_LEQ); - gen_helper_memory_to_g(t0, tmp); + tcg_gen_qemu_ld_i64(tmp, addr, ctx->mem_idx, MO_LEQ); + gen_helper_memory_to_g(dest, tmp); tcg_temp_free(tmp); } -static inline void gen_qemu_lds(TCGv t0, TCGv t1, int flags) +static void gen_lds(DisasContext *ctx, TCGv dest, TCGv addr) { TCGv_i32 tmp32 = tcg_temp_new_i32(); - tcg_gen_qemu_ld_i32(tmp32, t1, flags, MO_LEUL); - gen_helper_memory_to_s(t0, tmp32); + tcg_gen_qemu_ld_i32(tmp32, addr, ctx->mem_idx, MO_LEUL); + gen_helper_memory_to_s(dest, tmp32); tcg_temp_free_i32(tmp32); } -static inline void gen_qemu_ldl_l(TCGv t0, TCGv t1, int flags) +static void gen_ldt(DisasContext *ctx, TCGv dest, TCGv addr) { - tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL); - tcg_gen_mov_i64(cpu_lock_addr, t1); - tcg_gen_mov_i64(cpu_lock_value, t0); + tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_LEQ); } -static inline void gen_qemu_ldq_l(TCGv t0, TCGv t1, int flags) +static void gen_load_fp(DisasContext *ctx, int ra, int rb, int32_t disp16, + void (*func)(DisasContext *, TCGv, TCGv)) { - tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ); - tcg_gen_mov_i64(cpu_lock_addr, t1); - tcg_gen_mov_i64(cpu_lock_value, t0); + /* Loads to $f31 are prefetches, which we can treat as nops. */ + if (likely(ra != 31)) { + TCGv addr = tcg_temp_new(); + tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); + func(ctx, cpu_fir[ra], addr); + tcg_temp_free(addr); + } } -static inline void gen_load_mem(DisasContext *ctx, - void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1, - int flags), - int ra, int rb, int32_t disp16, bool fp, - bool clear) +static void gen_load_int(DisasContext *ctx, int ra, int rb, int32_t disp16, + MemOp op, bool clear, bool locked) { - TCGv tmp, addr, va; + TCGv addr, dest; /* LDQ_U with ra $31 is UNOP. Other various loads are forms of prefetches, which we can treat as nops. No worries about @@ -320,72 +320,75 @@ static inline void gen_load_mem(DisasContext *ctx, return; } - tmp = tcg_temp_new(); - addr = load_gpr(ctx, rb); - - if (disp16) { - tcg_gen_addi_i64(tmp, addr, disp16); - addr = tmp; - } + addr = tcg_temp_new(); + tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); if (clear) { - tcg_gen_andi_i64(tmp, addr, ~0x7); - addr = tmp; + tcg_gen_andi_i64(addr, addr, ~0x7); } - va = (fp ? cpu_fir[ra] : ctx->ir[ra]); - tcg_gen_qemu_load(va, addr, ctx->mem_idx); + dest = ctx->ir[ra]; + tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, op); - tcg_temp_free(tmp); + if (locked) { + tcg_gen_mov_i64(cpu_lock_addr, addr); + tcg_gen_mov_i64(cpu_lock_value, dest); + } + tcg_temp_free(addr); } -static inline void gen_qemu_stf(TCGv t0, TCGv t1, int flags) +static void gen_stf(DisasContext *ctx, TCGv src, TCGv addr) { TCGv_i32 tmp32 = tcg_temp_new_i32(); - gen_helper_f_to_memory(tmp32, t0); - tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL); + gen_helper_f_to_memory(tmp32, addr); + tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL); tcg_temp_free_i32(tmp32); } -static inline void gen_qemu_stg(TCGv t0, TCGv t1, int flags) +static void gen_stg(DisasContext *ctx, TCGv src, TCGv addr) { TCGv tmp = tcg_temp_new(); - gen_helper_g_to_memory(tmp, t0); - tcg_gen_qemu_st_i64(tmp, t1, flags, MO_LEQ); + gen_helper_g_to_memory(tmp, src); + tcg_gen_qemu_st_i64(tmp, addr, ctx->mem_idx, MO_LEQ); tcg_temp_free(tmp); } -static inline void gen_qemu_sts(TCGv t0, TCGv t1, int flags) +static void gen_sts(DisasContext *ctx, TCGv src, TCGv addr) { TCGv_i32 tmp32 = tcg_temp_new_i32(); - gen_helper_s_to_memory(tmp32, t0); - tcg_gen_qemu_st_i32(tmp32, t1, flags, MO_LEUL); + gen_helper_s_to_memory(tmp32, src); + tcg_gen_qemu_st_i32(tmp32, addr, ctx->mem_idx, MO_LEUL); tcg_temp_free_i32(tmp32); } -static inline void gen_store_mem(DisasContext *ctx, - void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1, - int flags), - int ra, int rb, int32_t disp16, bool fp, - bool clear) +static void gen_stt(DisasContext *ctx, TCGv src, TCGv addr) { - TCGv tmp, addr, va; + tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, MO_LEQ); +} - tmp = tcg_temp_new(); - addr = load_gpr(ctx, rb); +static void gen_store_fp(DisasContext *ctx, int ra, int rb, int32_t disp16, + void (*func)(DisasContext *, TCGv, TCGv)) +{ + TCGv addr = tcg_temp_new(); + tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); + func(ctx, load_fpr(ctx, ra), addr); + tcg_temp_free(addr); +} - if (disp16) { - tcg_gen_addi_i64(tmp, addr, disp16); - addr = tmp; - } +static void gen_store_int(DisasContext *ctx, int ra, int rb, int32_t disp16, + MemOp op, bool clear) +{ + TCGv addr, src; + + addr = tcg_temp_new(); + tcg_gen_addi_i64(addr, load_gpr(ctx, rb), disp16); if (clear) { - tcg_gen_andi_i64(tmp, addr, ~0x7); - addr = tmp; + tcg_gen_andi_i64(addr, addr, ~0x7); } - va = (fp ? load_fpr(ctx, ra) : load_gpr(ctx, ra)); - tcg_gen_qemu_store(va, addr, ctx->mem_idx); + src = load_gpr(ctx, ra); + tcg_gen_qemu_st_i64(src, addr, ctx->mem_idx, op); - tcg_temp_free(tmp); + tcg_temp_free(addr); } static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb, @@ -1480,30 +1483,30 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) case 0x0A: /* LDBU */ REQUIRE_AMASK(BWX); - gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0); + gen_load_int(ctx, ra, rb, disp16, MO_UB, 0, 0); break; case 0x0B: /* LDQ_U */ - gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1); + gen_load_int(ctx, ra, rb, disp16, MO_LEQ, 1, 0); break; case 0x0C: /* LDWU */ REQUIRE_AMASK(BWX); - gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0); + gen_load_int(ctx, ra, rb, disp16, MO_LEUW, 0, 0); break; case 0x0D: /* STW */ REQUIRE_AMASK(BWX); - gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0); + gen_store_int(ctx, ra, rb, disp16, MO_LEUW, 0); break; case 0x0E: /* STB */ REQUIRE_AMASK(BWX); - gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0); + gen_store_int(ctx, ra, rb, disp16, MO_UB, 0); break; case 0x0F: /* STQ_U */ - gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1); + gen_store_int(ctx, ra, rb, disp16, MO_LEQ, 1); break; case 0x10: @@ -2458,11 +2461,15 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) break; case 0x2: /* Longword physical access with lock (hw_ldl_l/p) */ - gen_qemu_ldl_l(va, addr, MMU_PHYS_IDX); + tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LESL); + tcg_gen_mov_i64(cpu_lock_addr, addr); + tcg_gen_mov_i64(cpu_lock_value, va); break; case 0x3: /* Quadword physical access with lock (hw_ldq_l/p) */ - gen_qemu_ldq_l(va, addr, MMU_PHYS_IDX); + tcg_gen_qemu_ld_i64(va, addr, MMU_PHYS_IDX, MO_LEQ); + tcg_gen_mov_i64(cpu_lock_addr, addr); + tcg_gen_mov_i64(cpu_lock_value, va); break; case 0x4: /* Longword virtual PTE fetch (hw_ldl/v) */ @@ -2776,66 +2783,66 @@ static DisasJumpType translate_one(DisasContext *ctx, uint32_t insn) case 0x20: /* LDF */ REQUIRE_FEN; - gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0); + gen_load_fp(ctx, ra, rb, disp16, gen_ldf); break; case 0x21: /* LDG */ REQUIRE_FEN; - gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0); + gen_load_fp(ctx, ra, rb, disp16, gen_ldg); break; case 0x22: /* LDS */ REQUIRE_FEN; - gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0); + gen_load_fp(ctx, ra, rb, disp16, gen_lds); break; case 0x23: /* LDT */ REQUIRE_FEN; - gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0); + gen_load_fp(ctx, ra, rb, disp16, gen_ldt); break; case 0x24: /* STF */ REQUIRE_FEN; - gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0); + gen_store_fp(ctx, ra, rb, disp16, gen_stf); break; case 0x25: /* STG */ REQUIRE_FEN; - gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0); + gen_store_fp(ctx, ra, rb, disp16, gen_stg); break; case 0x26: /* STS */ REQUIRE_FEN; - gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0); + gen_store_fp(ctx, ra, rb, disp16, gen_sts); break; case 0x27: /* STT */ REQUIRE_FEN; - gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0); + gen_store_fp(ctx, ra, rb, disp16, gen_stt); break; case 0x28: /* LDL */ - gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0); + gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 0); break; case 0x29: /* LDQ */ - gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0); + gen_load_int(ctx, ra, rb, disp16, MO_LEQ, 0, 0); break; case 0x2A: /* LDL_L */ - gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0); + gen_load_int(ctx, ra, rb, disp16, MO_LESL, 0, 1); break; case 0x2B: /* LDQ_L */ - gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0); + gen_load_int(ctx, ra, rb, disp16, MO_LEQ, 0, 1); break; case 0x2C: /* STL */ - gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0); + gen_store_int(ctx, ra, rb, disp16, MO_LEUL, 0); break; case 0x2D: /* STQ */ - gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0); + gen_store_int(ctx, ra, rb, disp16, MO_LEQ, 0); break; case 0x2E: /* STL_C */ @@ -2998,17 +3005,10 @@ static void alpha_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); /* FALLTHRU */ case DISAS_PC_UPDATED: - if (!ctx->base.singlestep_enabled) { - tcg_gen_lookup_and_goto_ptr(); - break; - } - /* FALLTHRU */ + tcg_gen_lookup_and_goto_ptr(); + break; case DISAS_PC_UPDATED_NOCHAIN: - if (ctx->base.singlestep_enabled) { - gen_excp_1(EXCP_DEBUG, 0); - } else { - tcg_gen_exit_tb(NULL, 0); - } + tcg_gen_exit_tb(NULL, 0); break; default: g_assert_not_reached(); diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 641a8c2d3d..a211804fd3 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -2031,10 +2031,13 @@ static const struct SysemuCPUOps arm_sysemu_ops = { static const struct TCGCPUOps arm_tcg_ops = { .initialize = arm_translate_init, .synchronize_from_tb = arm_cpu_synchronize_from_tb, - .tlb_fill = arm_cpu_tlb_fill, .debug_excp_handler = arm_debug_excp_handler, -#if !defined(CONFIG_USER_ONLY) +#ifdef CONFIG_USER_ONLY + .record_sigsegv = arm_cpu_record_sigsegv, + .record_sigbus = arm_cpu_record_sigbus, +#else + .tlb_fill = arm_cpu_tlb_fill, .cpu_exec_interrupt = arm_cpu_exec_interrupt, .do_interrupt = arm_cpu_do_interrupt, .do_transaction_failed = arm_cpu_do_transaction_failed, diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c index 0d5adccf1a..13d0e9b195 100644 --- a/target/arm/cpu_tcg.c +++ b/target/arm/cpu_tcg.c @@ -898,10 +898,13 @@ static void pxa270c5_initfn(Object *obj) static const struct TCGCPUOps arm_v7m_tcg_ops = { .initialize = arm_translate_init, .synchronize_from_tb = arm_cpu_synchronize_from_tb, - .tlb_fill = arm_cpu_tlb_fill, .debug_excp_handler = arm_debug_excp_handler, -#if !defined(CONFIG_USER_ONLY) +#ifdef CONFIG_USER_ONLY + .record_sigsegv = arm_cpu_record_sigsegv, + .record_sigbus = arm_cpu_record_sigbus, +#else + .tlb_fill = arm_cpu_tlb_fill, .cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt, .do_interrupt = arm_v7m_cpu_do_interrupt, .do_transaction_failed = arm_cpu_do_transaction_failed, diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c index 826601b341..134da0d0ae 100644 --- a/target/arm/gdbstub.c +++ b/target/arm/gdbstub.c @@ -19,6 +19,7 @@ */ #include "qemu/osdep.h" #include "cpu.h" +#include "internals.h" #include "exec/gdbstub.h" typedef struct RegisterSysregXmlParam { @@ -124,6 +125,133 @@ int arm_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) return 0; } +static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) +{ + ARMCPU *cpu = env_archcpu(env); + int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16; + + /* VFP data registers are always little-endian. */ + if (reg < nregs) { + return gdb_get_reg64(buf, *aa32_vfp_dreg(env, reg)); + } + if (arm_feature(env, ARM_FEATURE_NEON)) { + /* Aliases for Q regs. */ + nregs += 16; + if (reg < nregs) { + uint64_t *q = aa32_vfp_qreg(env, reg - 32); + return gdb_get_reg128(buf, q[0], q[1]); + } + } + switch (reg - nregs) { + case 0: + return gdb_get_reg32(buf, vfp_get_fpscr(env)); + } + return 0; +} + +static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) +{ + ARMCPU *cpu = env_archcpu(env); + int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16; + + if (reg < nregs) { + *aa32_vfp_dreg(env, reg) = ldq_le_p(buf); + return 8; + } + if (arm_feature(env, ARM_FEATURE_NEON)) { + nregs += 16; + if (reg < nregs) { + uint64_t *q = aa32_vfp_qreg(env, reg - 32); + q[0] = ldq_le_p(buf); + q[1] = ldq_le_p(buf + 8); + return 16; + } + } + switch (reg - nregs) { + case 0: + vfp_set_fpscr(env, ldl_p(buf)); + return 4; + } + return 0; +} + +static int vfp_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg) +{ + switch (reg) { + case 0: + return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPSID]); + case 1: + return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPEXC]); + } + return 0; +} + +static int vfp_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg) +{ + switch (reg) { + case 0: + env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); + return 4; + case 1: + env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); + return 4; + } + return 0; +} + +static int mve_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) +{ + switch (reg) { + case 0: + return gdb_get_reg32(buf, env->v7m.vpr); + default: + return 0; + } +} + +static int mve_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) +{ + switch (reg) { + case 0: + env->v7m.vpr = ldl_p(buf); + return 4; + default: + return 0; + } +} + +/** + * arm_get/set_gdb_*: get/set a gdb register + * @env: the CPU state + * @buf: a buffer to copy to/from + * @reg: register number (offset from start of group) + * + * We return the number of bytes copied + */ + +static int arm_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg) +{ + ARMCPU *cpu = env_archcpu(env); + const ARMCPRegInfo *ri; + uint32_t key; + + key = cpu->dyn_sysreg_xml.data.cpregs.keys[reg]; + ri = get_arm_cp_reginfo(cpu->cp_regs, key); + if (ri) { + if (cpreg_field_is_64bit(ri)) { + return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri)); + } else { + return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri)); + } + } + return 0; +} + +static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg) +{ + return 0; +} + static void arm_gen_one_xml_sysreg_tag(GString *s, DynamicGDBXMLInfo *dyn_xml, ARMCPRegInfo *ri, uint32_t ri_key, int bitsize, int regnum) @@ -319,3 +447,54 @@ const char *arm_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname) } return NULL; } + +void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) +{ + CPUState *cs = CPU(cpu); + CPUARMState *env = &cpu->env; + + if (arm_feature(env, ARM_FEATURE_AARCH64)) { + /* + * The lower part of each SVE register aliases to the FPU + * registers so we don't need to include both. + */ +#ifdef TARGET_AARCH64 + if (isar_feature_aa64_sve(&cpu->isar)) { + gdb_register_coprocessor(cs, arm_gdb_get_svereg, arm_gdb_set_svereg, + arm_gen_dynamic_svereg_xml(cs, cs->gdb_num_regs), + "sve-registers.xml", 0); + } else { + gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg, + aarch64_fpu_gdb_set_reg, + 34, "aarch64-fpu.xml", 0); + } +#endif + } else { + if (arm_feature(env, ARM_FEATURE_NEON)) { + gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, + 49, "arm-neon.xml", 0); + } else if (cpu_isar_feature(aa32_simd_r32, cpu)) { + gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, + 33, "arm-vfp3.xml", 0); + } else if (cpu_isar_feature(aa32_vfp_simd, cpu)) { + gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, + 17, "arm-vfp.xml", 0); + } + if (!arm_feature(env, ARM_FEATURE_M)) { + /* + * A and R profile have FP sysregs FPEXC and FPSID that we + * expose to gdb. + */ + gdb_register_coprocessor(cs, vfp_gdb_get_sysreg, vfp_gdb_set_sysreg, + 2, "arm-vfp-sysregs.xml", 0); + } + } + if (cpu_isar_feature(aa32_mve, cpu)) { + gdb_register_coprocessor(cs, mve_gdb_get_reg, mve_gdb_set_reg, + 1, "arm-m-profile-mve.xml", 0); + } + gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg, + arm_gen_dynamic_sysreg_xml(cs, cs->gdb_num_regs), + "system-registers.xml", 0); + +} diff --git a/target/arm/gdbstub64.c b/target/arm/gdbstub64.c index 251539ef79..596878666d 100644 --- a/target/arm/gdbstub64.c +++ b/target/arm/gdbstub64.c @@ -17,7 +17,9 @@ * License along with this library; if not, see . */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "cpu.h" +#include "internals.h" #include "exec/gdbstub.h" int aarch64_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) @@ -69,3 +71,141 @@ int aarch64_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) /* Unknown register. */ return 0; } + +int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) +{ + switch (reg) { + case 0 ... 31: + { + /* 128 bit FP register - quads are in LE order */ + uint64_t *q = aa64_vfp_qreg(env, reg); + return gdb_get_reg128(buf, q[1], q[0]); + } + case 32: + /* FPSR */ + return gdb_get_reg32(buf, vfp_get_fpsr(env)); + case 33: + /* FPCR */ + return gdb_get_reg32(buf, vfp_get_fpcr(env)); + default: + return 0; + } +} + +int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) +{ + switch (reg) { + case 0 ... 31: + /* 128 bit FP register */ + { + uint64_t *q = aa64_vfp_qreg(env, reg); + q[0] = ldq_le_p(buf); + q[1] = ldq_le_p(buf + 8); + return 16; + } + case 32: + /* FPSR */ + vfp_set_fpsr(env, ldl_p(buf)); + return 4; + case 33: + /* FPCR */ + vfp_set_fpcr(env, ldl_p(buf)); + return 4; + default: + return 0; + } +} + +int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg) +{ + ARMCPU *cpu = env_archcpu(env); + + switch (reg) { + /* The first 32 registers are the zregs */ + case 0 ... 31: + { + int vq, len = 0; + for (vq = 0; vq < cpu->sve_max_vq; vq++) { + len += gdb_get_reg128(buf, + env->vfp.zregs[reg].d[vq * 2 + 1], + env->vfp.zregs[reg].d[vq * 2]); + } + return len; + } + case 32: + return gdb_get_reg32(buf, vfp_get_fpsr(env)); + case 33: + return gdb_get_reg32(buf, vfp_get_fpcr(env)); + /* then 16 predicates and the ffr */ + case 34 ... 50: + { + int preg = reg - 34; + int vq, len = 0; + for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) { + len += gdb_get_reg64(buf, env->vfp.pregs[preg].p[vq / 4]); + } + return len; + } + case 51: + { + /* + * We report in Vector Granules (VG) which is 64bit in a Z reg + * while the ZCR works in Vector Quads (VQ) which is 128bit chunks. + */ + int vq = sve_zcr_len_for_el(env, arm_current_el(env)) + 1; + return gdb_get_reg64(buf, vq * 2); + } + default: + /* gdbstub asked for something out our range */ + qemu_log_mask(LOG_UNIMP, "%s: out of range register %d", __func__, reg); + break; + } + + return 0; +} + +int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg) +{ + ARMCPU *cpu = env_archcpu(env); + + /* The first 32 registers are the zregs */ + switch (reg) { + /* The first 32 registers are the zregs */ + case 0 ... 31: + { + int vq, len = 0; + uint64_t *p = (uint64_t *) buf; + for (vq = 0; vq < cpu->sve_max_vq; vq++) { + env->vfp.zregs[reg].d[vq * 2 + 1] = *p++; + env->vfp.zregs[reg].d[vq * 2] = *p++; + len += 16; + } + return len; + } + case 32: + vfp_set_fpsr(env, *(uint32_t *)buf); + return 4; + case 33: + vfp_set_fpcr(env, *(uint32_t *)buf); + return 4; + case 34 ... 50: + { + int preg = reg - 34; + int vq, len = 0; + uint64_t *p = (uint64_t *) buf; + for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) { + env->vfp.pregs[preg].p[vq / 4] = *p++; + len += 8; + } + return len; + } + case 51: + /* cannot set vg via gdbstub */ + return 0; + default: + /* gdbstub asked for something out our range */ + break; + } + + return 0; +} diff --git a/target/arm/helper-a64.c b/target/arm/helper-a64.c index 19445b3c94..5ae2ecb0f3 100644 --- a/target/arm/helper-a64.c +++ b/target/arm/helper-a64.c @@ -32,7 +32,6 @@ #include "exec/cpu_ldst.h" #include "qemu/int128.h" #include "qemu/atomic128.h" -#include "tcg/tcg.h" #include "fpu/softfloat.h" #include /* For crc32 */ @@ -513,37 +512,19 @@ uint64_t HELPER(paired_cmpxchg64_le)(CPUARMState *env, uint64_t addr, uintptr_t ra = GETPC(); uint64_t o0, o1; bool success; - -#ifdef CONFIG_USER_ONLY - /* ??? Enforce alignment. */ - uint64_t *haddr = g2h(env_cpu(env), addr); - - set_helper_retaddr(ra); - o0 = ldq_le_p(haddr + 0); - o1 = ldq_le_p(haddr + 1); - oldv = int128_make128(o0, o1); - - success = int128_eq(oldv, cmpv); - if (success) { - stq_le_p(haddr + 0, int128_getlo(newv)); - stq_le_p(haddr + 1, int128_gethi(newv)); - } - clear_helper_retaddr(); -#else int mem_idx = cpu_mmu_index(env, false); - TCGMemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); - TCGMemOpIdx oi1 = make_memop_idx(MO_LEQ, mem_idx); + MemOpIdx oi0 = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); + MemOpIdx oi1 = make_memop_idx(MO_LEQ, mem_idx); - o0 = helper_le_ldq_mmu(env, addr + 0, oi0, ra); - o1 = helper_le_ldq_mmu(env, addr + 8, oi1, ra); + o0 = cpu_ldq_le_mmu(env, addr + 0, oi0, ra); + o1 = cpu_ldq_le_mmu(env, addr + 8, oi1, ra); oldv = int128_make128(o0, o1); success = int128_eq(oldv, cmpv); if (success) { - helper_le_stq_mmu(env, addr + 0, int128_getlo(newv), oi1, ra); - helper_le_stq_mmu(env, addr + 8, int128_gethi(newv), oi1, ra); + cpu_stq_le_mmu(env, addr + 0, int128_getlo(newv), oi1, ra); + cpu_stq_le_mmu(env, addr + 8, int128_gethi(newv), oi1, ra); } -#endif return !success; } @@ -555,12 +536,12 @@ uint64_t HELPER(paired_cmpxchg64_le_parallel)(CPUARMState *env, uint64_t addr, uintptr_t ra = GETPC(); bool success; int mem_idx; - TCGMemOpIdx oi; + MemOpIdx oi; assert(HAVE_CMPXCHG128); mem_idx = cpu_mmu_index(env, false); - oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); + oi = make_memop_idx(MO_LE | MO_128 | MO_ALIGN, mem_idx); cmpv = int128_make128(env->exclusive_val, env->exclusive_high); newv = int128_make128(new_lo, new_hi); @@ -583,37 +564,19 @@ uint64_t HELPER(paired_cmpxchg64_be)(CPUARMState *env, uint64_t addr, uintptr_t ra = GETPC(); uint64_t o0, o1; bool success; - -#ifdef CONFIG_USER_ONLY - /* ??? Enforce alignment. */ - uint64_t *haddr = g2h(env_cpu(env), addr); - - set_helper_retaddr(ra); - o1 = ldq_be_p(haddr + 0); - o0 = ldq_be_p(haddr + 1); - oldv = int128_make128(o0, o1); - - success = int128_eq(oldv, cmpv); - if (success) { - stq_be_p(haddr + 0, int128_gethi(newv)); - stq_be_p(haddr + 1, int128_getlo(newv)); - } - clear_helper_retaddr(); -#else int mem_idx = cpu_mmu_index(env, false); - TCGMemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx); - TCGMemOpIdx oi1 = make_memop_idx(MO_BEQ, mem_idx); + MemOpIdx oi0 = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx); + MemOpIdx oi1 = make_memop_idx(MO_BEQ, mem_idx); - o1 = helper_be_ldq_mmu(env, addr + 0, oi0, ra); - o0 = helper_be_ldq_mmu(env, addr + 8, oi1, ra); + o1 = cpu_ldq_be_mmu(env, addr + 0, oi0, ra); + o0 = cpu_ldq_be_mmu(env, addr + 8, oi1, ra); oldv = int128_make128(o0, o1); success = int128_eq(oldv, cmpv); if (success) { - helper_be_stq_mmu(env, addr + 0, int128_gethi(newv), oi1, ra); - helper_be_stq_mmu(env, addr + 8, int128_getlo(newv), oi1, ra); + cpu_stq_be_mmu(env, addr + 0, int128_gethi(newv), oi1, ra); + cpu_stq_be_mmu(env, addr + 8, int128_getlo(newv), oi1, ra); } -#endif return !success; } @@ -625,12 +588,12 @@ uint64_t HELPER(paired_cmpxchg64_be_parallel)(CPUARMState *env, uint64_t addr, uintptr_t ra = GETPC(); bool success; int mem_idx; - TCGMemOpIdx oi; + MemOpIdx oi; assert(HAVE_CMPXCHG128); mem_idx = cpu_mmu_index(env, false); - oi = make_memop_idx(MO_BEQ | MO_ALIGN_16, mem_idx); + oi = make_memop_idx(MO_BE | MO_128 | MO_ALIGN, mem_idx); /* * High and low need to be switched here because this is not actually a @@ -651,12 +614,12 @@ void HELPER(casp_le_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr, Int128 oldv, cmpv, newv; uintptr_t ra = GETPC(); int mem_idx; - TCGMemOpIdx oi; + MemOpIdx oi; assert(HAVE_CMPXCHG128); mem_idx = cpu_mmu_index(env, false); - oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); + oi = make_memop_idx(MO_LE | MO_128 | MO_ALIGN, mem_idx); cmpv = int128_make128(env->xregs[rs], env->xregs[rs + 1]); newv = int128_make128(new_lo, new_hi); @@ -672,12 +635,12 @@ void HELPER(casp_be_parallel)(CPUARMState *env, uint32_t rs, uint64_t addr, Int128 oldv, cmpv, newv; uintptr_t ra = GETPC(); int mem_idx; - TCGMemOpIdx oi; + MemOpIdx oi; assert(HAVE_CMPXCHG128); mem_idx = cpu_mmu_index(env, false); - oi = make_memop_idx(MO_LEQ | MO_ALIGN_16, mem_idx); + oi = make_memop_idx(MO_LE | MO_128 | MO_ALIGN, mem_idx); cmpv = int128_make128(env->xregs[rs + 1], env->xregs[rs]); newv = int128_make128(new_lo, new_hi); diff --git a/target/arm/helper.c b/target/arm/helper.c index 6274221447..9b317899a6 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -12,7 +12,6 @@ #include "trace.h" #include "cpu.h" #include "internals.h" -#include "exec/gdbstub.h" #include "exec/helper-proto.h" #include "qemu/host-utils.h" #include "qemu/main-loop.h" @@ -54,101 +53,6 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address, static void switch_mode(CPUARMState *env, int mode); static int aa64_va_parameter_tbi(uint64_t tcr, ARMMMUIdx mmu_idx); -static int vfp_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) -{ - ARMCPU *cpu = env_archcpu(env); - int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16; - - /* VFP data registers are always little-endian. */ - if (reg < nregs) { - return gdb_get_reg64(buf, *aa32_vfp_dreg(env, reg)); - } - if (arm_feature(env, ARM_FEATURE_NEON)) { - /* Aliases for Q regs. */ - nregs += 16; - if (reg < nregs) { - uint64_t *q = aa32_vfp_qreg(env, reg - 32); - return gdb_get_reg128(buf, q[0], q[1]); - } - } - switch (reg - nregs) { - case 0: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPSID]); break; - case 1: return gdb_get_reg32(buf, vfp_get_fpscr(env)); break; - case 2: return gdb_get_reg32(buf, env->vfp.xregs[ARM_VFP_FPEXC]); break; - } - return 0; -} - -static int vfp_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) -{ - ARMCPU *cpu = env_archcpu(env); - int nregs = cpu_isar_feature(aa32_simd_r32, cpu) ? 32 : 16; - - if (reg < nregs) { - *aa32_vfp_dreg(env, reg) = ldq_le_p(buf); - return 8; - } - if (arm_feature(env, ARM_FEATURE_NEON)) { - nregs += 16; - if (reg < nregs) { - uint64_t *q = aa32_vfp_qreg(env, reg - 32); - q[0] = ldq_le_p(buf); - q[1] = ldq_le_p(buf + 8); - return 16; - } - } - switch (reg - nregs) { - case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4; - case 1: vfp_set_fpscr(env, ldl_p(buf)); return 4; - case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf) & (1 << 30); return 4; - } - return 0; -} - -static int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg) -{ - switch (reg) { - case 0 ... 31: - { - /* 128 bit FP register - quads are in LE order */ - uint64_t *q = aa64_vfp_qreg(env, reg); - return gdb_get_reg128(buf, q[1], q[0]); - } - case 32: - /* FPSR */ - return gdb_get_reg32(buf, vfp_get_fpsr(env)); - case 33: - /* FPCR */ - return gdb_get_reg32(buf,vfp_get_fpcr(env)); - default: - return 0; - } -} - -static int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg) -{ - switch (reg) { - case 0 ... 31: - /* 128 bit FP register */ - { - uint64_t *q = aa64_vfp_qreg(env, reg); - q[0] = ldq_le_p(buf); - q[1] = ldq_le_p(buf + 8); - return 16; - } - case 32: - /* FPSR */ - vfp_set_fpsr(env, ldl_p(buf)); - return 4; - case 33: - /* FPCR */ - vfp_set_fpcr(env, ldl_p(buf)); - return 4; - default: - return 0; - } -} - static uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) { assert(ri->fieldoffset); @@ -208,134 +112,6 @@ static void write_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri, } } -/** - * arm_get/set_gdb_*: get/set a gdb register - * @env: the CPU state - * @buf: a buffer to copy to/from - * @reg: register number (offset from start of group) - * - * We return the number of bytes copied - */ - -static int arm_gdb_get_sysreg(CPUARMState *env, GByteArray *buf, int reg) -{ - ARMCPU *cpu = env_archcpu(env); - const ARMCPRegInfo *ri; - uint32_t key; - - key = cpu->dyn_sysreg_xml.data.cpregs.keys[reg]; - ri = get_arm_cp_reginfo(cpu->cp_regs, key); - if (ri) { - if (cpreg_field_is_64bit(ri)) { - return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri)); - } else { - return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri)); - } - } - return 0; -} - -static int arm_gdb_set_sysreg(CPUARMState *env, uint8_t *buf, int reg) -{ - return 0; -} - -#ifdef TARGET_AARCH64 -static int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg) -{ - ARMCPU *cpu = env_archcpu(env); - - switch (reg) { - /* The first 32 registers are the zregs */ - case 0 ... 31: - { - int vq, len = 0; - for (vq = 0; vq < cpu->sve_max_vq; vq++) { - len += gdb_get_reg128(buf, - env->vfp.zregs[reg].d[vq * 2 + 1], - env->vfp.zregs[reg].d[vq * 2]); - } - return len; - } - case 32: - return gdb_get_reg32(buf, vfp_get_fpsr(env)); - case 33: - return gdb_get_reg32(buf, vfp_get_fpcr(env)); - /* then 16 predicates and the ffr */ - case 34 ... 50: - { - int preg = reg - 34; - int vq, len = 0; - for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) { - len += gdb_get_reg64(buf, env->vfp.pregs[preg].p[vq / 4]); - } - return len; - } - case 51: - { - /* - * We report in Vector Granules (VG) which is 64bit in a Z reg - * while the ZCR works in Vector Quads (VQ) which is 128bit chunks. - */ - int vq = sve_zcr_len_for_el(env, arm_current_el(env)) + 1; - return gdb_get_reg64(buf, vq * 2); - } - default: - /* gdbstub asked for something out our range */ - qemu_log_mask(LOG_UNIMP, "%s: out of range register %d", __func__, reg); - break; - } - - return 0; -} - -static int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg) -{ - ARMCPU *cpu = env_archcpu(env); - - /* The first 32 registers are the zregs */ - switch (reg) { - /* The first 32 registers are the zregs */ - case 0 ... 31: - { - int vq, len = 0; - uint64_t *p = (uint64_t *) buf; - for (vq = 0; vq < cpu->sve_max_vq; vq++) { - env->vfp.zregs[reg].d[vq * 2 + 1] = *p++; - env->vfp.zregs[reg].d[vq * 2] = *p++; - len += 16; - } - return len; - } - case 32: - vfp_set_fpsr(env, *(uint32_t *)buf); - return 4; - case 33: - vfp_set_fpcr(env, *(uint32_t *)buf); - return 4; - case 34 ... 50: - { - int preg = reg - 34; - int vq, len = 0; - uint64_t *p = (uint64_t *) buf; - for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) { - env->vfp.pregs[preg].p[vq / 4] = *p++; - len += 8; - } - return len; - } - case 51: - /* cannot set vg via gdbstub */ - return 0; - default: - /* gdbstub asked for something out our range */ - break; - } - - return 0; -} -#endif /* TARGET_AARCH64 */ - static bool raw_accessors_invalid(const ARMCPRegInfo *ri) { /* Return true if the regdef would cause an assertion if you called @@ -8658,44 +8434,6 @@ void register_cp_regs_for_features(ARMCPU *cpu) #endif } -void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) -{ - CPUState *cs = CPU(cpu); - CPUARMState *env = &cpu->env; - - if (arm_feature(env, ARM_FEATURE_AARCH64)) { - /* - * The lower part of each SVE register aliases to the FPU - * registers so we don't need to include both. - */ -#ifdef TARGET_AARCH64 - if (isar_feature_aa64_sve(&cpu->isar)) { - gdb_register_coprocessor(cs, arm_gdb_get_svereg, arm_gdb_set_svereg, - arm_gen_dynamic_svereg_xml(cs, cs->gdb_num_regs), - "sve-registers.xml", 0); - } else -#endif - { - gdb_register_coprocessor(cs, aarch64_fpu_gdb_get_reg, - aarch64_fpu_gdb_set_reg, - 34, "aarch64-fpu.xml", 0); - } - } else if (arm_feature(env, ARM_FEATURE_NEON)) { - gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, - 51, "arm-neon.xml", 0); - } else if (cpu_isar_feature(aa32_simd_r32, cpu)) { - gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, - 35, "arm-vfp3.xml", 0); - } else if (cpu_isar_feature(aa32_vfp_simd, cpu)) { - gdb_register_coprocessor(cs, vfp_gdb_get_reg, vfp_gdb_set_reg, - 19, "arm-vfp.xml", 0); - } - gdb_register_coprocessor(cs, arm_gdb_get_sysreg, arm_gdb_set_sysreg, - arm_gen_dynamic_sysreg_xml(cs, cs->gdb_num_regs), - "system-registers.xml", 0); - -} - /* Sort alphabetically by type name, except for "any". */ static gint arm_cpu_list_compare(gconstpointer a, gconstpointer b) { diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c index bff3e0cde7..0dc96560d3 100644 --- a/target/arm/hvf/hvf.c +++ b/target/arm/hvf/hvf.c @@ -1150,12 +1150,19 @@ int hvf_vcpu_exec(CPUState *cpu) uint32_t sas = (syndrome >> 22) & 3; uint32_t len = 1 << sas; uint32_t srt = (syndrome >> 16) & 0x1f; + uint32_t cm = (syndrome >> 8) & 0x1; uint64_t val = 0; trace_hvf_data_abort(env->pc, hvf_exit->exception.virtual_address, hvf_exit->exception.physical_address, isv, iswrite, s1ptw, len, srt); + if (cm) { + /* We don't cache MMIO regions */ + advance_pc = true; + break; + } + assert(isv); if (iswrite) { diff --git a/target/arm/internals.h b/target/arm/internals.h index 9fbb364968..89f7610ebc 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -544,9 +544,17 @@ static inline bool arm_extabort_type(MemTxResult result) return result != MEMTX_DECODE_ERROR; } +#ifdef CONFIG_USER_ONLY +void arm_cpu_record_sigsegv(CPUState *cpu, vaddr addr, + MMUAccessType access_type, + bool maperr, uintptr_t ra); +void arm_cpu_record_sigbus(CPUState *cpu, vaddr addr, + MMUAccessType access_type, uintptr_t ra); +#else bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); +#endif static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx) { @@ -1270,4 +1278,11 @@ static inline uint64_t pmu_counter_mask(CPUARMState *env) return (1 << 31) | ((1 << pmu_num_counters(env)) - 1); } +#ifdef TARGET_AARCH64 +int arm_gdb_get_svereg(CPUARMState *env, GByteArray *buf, int reg); +int arm_gdb_set_svereg(CPUARMState *env, uint8_t *buf, int reg); +int aarch64_fpu_gdb_get_reg(CPUARMState *env, GByteArray *buf, int reg); +int aarch64_fpu_gdb_set_reg(CPUARMState *env, uint8_t *buf, int reg); +#endif + #endif diff --git a/target/arm/kvm.c b/target/arm/kvm.c index 94b970bbf9..bbf1ce7ba3 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -335,6 +335,7 @@ static void kvm_arm_devlistener_del(MemoryListener *listener, } static MemoryListener devlistener = { + .name = "kvm-arm", .region_add = kvm_arm_devlistener_add, .region_del = kvm_arm_devlistener_del, }; diff --git a/target/arm/m_helper.c b/target/arm/m_helper.c index 47903b3dc3..2c9922dc29 100644 --- a/target/arm/m_helper.c +++ b/target/arm/m_helper.c @@ -1930,7 +1930,7 @@ static bool do_v7m_function_return(ARMCPU *cpu) { bool threadmode, spsel; - TCGMemOpIdx oi; + MemOpIdx oi; ARMMMUIdx mmu_idx; uint32_t *frame_sp_p; uint32_t frameptr; @@ -1947,9 +1947,9 @@ static bool do_v7m_function_return(ARMCPU *cpu) * do them as secure, so work out what MMU index that is. */ mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true); - oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx)); - newpc = helper_le_ldul_mmu(env, frameptr, oi, 0); - newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0); + oi = make_memop_idx(MO_LEUL, arm_to_core_mmu_idx(mmu_idx)); + newpc = cpu_ldl_le_mmu(env, frameptr, oi, 0); + newpsr = cpu_ldl_le_mmu(env, frameptr + 4, oi, 0); /* Consistency checks on new IPSR */ newpsr_exc = newpsr & XPSR_EXCP; diff --git a/target/arm/mte_helper.c b/target/arm/mte_helper.c index 724175210b..e09b7e46a2 100644 --- a/target/arm/mte_helper.c +++ b/target/arm/mte_helper.c @@ -84,10 +84,8 @@ static uint8_t *allocation_tag_mem(CPUARMState *env, int ptr_mmu_idx, uintptr_t index; if (!(flags & (ptr_access == MMU_DATA_STORE ? PAGE_WRITE_ORG : PAGE_READ))) { - /* SIGSEGV */ - arm_cpu_tlb_fill(env_cpu(env), ptr, ptr_size, ptr_access, - ptr_mmu_idx, false, ra); - g_assert_not_reached(); + cpu_loop_exit_sigsegv(env_cpu(env), ptr, ptr_access, + !(flags & PAGE_VALID), ra); } /* Require both MAP_ANON and PROT_MTE for the page. */ diff --git a/target/arm/psci.c b/target/arm/psci.c index 6709e28013..b279c0b9a4 100644 --- a/target/arm/psci.c +++ b/target/arm/psci.c @@ -27,15 +27,13 @@ bool arm_is_psci_call(ARMCPU *cpu, int excp_type) { - /* Return true if the r0/x0 value indicates a PSCI call and - * the exception type matches the configured PSCI conduit. This is - * called before the SMC/HVC instruction is executed, to decide whether - * we should treat it as a PSCI call or with the architecturally + /* + * Return true if the exception type matches the configured PSCI conduit. + * This is called before the SMC/HVC instruction is executed, to decide + * whether we should treat it as a PSCI call or with the architecturally * defined behaviour for an SMC or HVC (which might be UNDEF or trap * to EL2 or to EL3). */ - CPUARMState *env = &cpu->env; - uint64_t param = is_a64(env) ? env->xregs[0] : env->regs[0]; switch (excp_type) { case EXCP_HVC: @@ -52,27 +50,7 @@ bool arm_is_psci_call(ARMCPU *cpu, int excp_type) return false; } - switch (param) { - case QEMU_PSCI_0_2_FN_PSCI_VERSION: - case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE: - case QEMU_PSCI_0_2_FN_AFFINITY_INFO: - case QEMU_PSCI_0_2_FN64_AFFINITY_INFO: - case QEMU_PSCI_0_2_FN_SYSTEM_RESET: - case QEMU_PSCI_0_2_FN_SYSTEM_OFF: - case QEMU_PSCI_0_1_FN_CPU_ON: - case QEMU_PSCI_0_2_FN_CPU_ON: - case QEMU_PSCI_0_2_FN64_CPU_ON: - case QEMU_PSCI_0_1_FN_CPU_OFF: - case QEMU_PSCI_0_2_FN_CPU_OFF: - case QEMU_PSCI_0_1_FN_CPU_SUSPEND: - case QEMU_PSCI_0_2_FN_CPU_SUSPEND: - case QEMU_PSCI_0_2_FN64_CPU_SUSPEND: - case QEMU_PSCI_0_1_FN_MIGRATE: - case QEMU_PSCI_0_2_FN_MIGRATE: - return true; - default: - return false; - } + return true; } void arm_handle_psci_call(ARMCPU *cpu) @@ -194,10 +172,9 @@ void arm_handle_psci_call(ARMCPU *cpu) break; case QEMU_PSCI_0_1_FN_MIGRATE: case QEMU_PSCI_0_2_FN_MIGRATE: + default: ret = QEMU_PSCI_RET_NOT_SUPPORTED; break; - default: - g_assert_not_reached(); } err: diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c index dab5f1d1cd..07be55b7e1 100644 --- a/target/arm/sve_helper.c +++ b/target/arm/sve_helper.c @@ -6118,7 +6118,7 @@ DO_LDN_2(4, dd, MO_64) * linux-user/ in its get_user/put_user macros. * * TODO: Construct some helpers, written in assembly, that interact with - * handle_cpu_signal to produce memory ops which can properly report errors + * host_signal_handler to produce memory ops which can properly report errors * without racing. */ diff --git a/target/arm/tlb_helper.c b/target/arm/tlb_helper.c index 3107f9823e..12a934e924 100644 --- a/target/arm/tlb_helper.c +++ b/target/arm/tlb_helper.c @@ -147,28 +147,12 @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi); } -#endif /* !defined(CONFIG_USER_ONLY) */ - bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) { ARMCPU *cpu = ARM_CPU(cs); ARMMMUFaultInfo fi = {}; - -#ifdef CONFIG_USER_ONLY - int flags = page_get_flags(useronly_clean_ptr(address)); - if (flags & PAGE_VALID) { - fi.type = ARMFault_Permission; - } else { - fi.type = ARMFault_Translation; - } - fi.level = 3; - - /* now we have a real cpu fault */ - cpu_restore_state(cs, retaddr, true); - arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi); -#else hwaddr phys_addr; target_ulong page_size; int prot, ret; @@ -210,5 +194,29 @@ bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size, cpu_restore_state(cs, retaddr, true); arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi); } -#endif } +#else +void arm_cpu_record_sigsegv(CPUState *cs, vaddr addr, + MMUAccessType access_type, + bool maperr, uintptr_t ra) +{ + ARMMMUFaultInfo fi = { + .type = maperr ? ARMFault_Translation : ARMFault_Permission, + .level = 3, + }; + ARMCPU *cpu = ARM_CPU(cs); + + /* + * We report both ESR and FAR to signal handlers. + * For now, it's easiest to deliver the fault normally. + */ + cpu_restore_state(cs, ra, true); + arm_deliver_fault(cpu, addr, access_type, MMU_USER_IDX, &fi); +} + +void arm_cpu_record_sigbus(CPUState *cs, vaddr addr, + MMUAccessType access_type, uintptr_t ra) +{ + arm_cpu_do_unaligned_access(cs, addr, access_type, MMU_USER_IDX, ra); +} +#endif /* !defined(CONFIG_USER_ONLY) */ diff --git a/target/arm/translate-a32.h b/target/arm/translate-a32.h index 88f15df60e..17af8dc95a 100644 --- a/target/arm/translate-a32.h +++ b/target/arm/translate-a32.h @@ -70,6 +70,9 @@ static inline void store_cpu_offset(TCGv_i32 var, int offset) #define store_cpu_field(var, name) \ store_cpu_offset(var, offsetof(CPUARMState, name)) +#define store_cpu_field_constant(val, name) \ + tcg_gen_st_i32(tcg_constant_i32(val), cpu_env, offsetof(CPUARMState, name)) + /* Create a new temporary and set it to the value of a CPU register. */ static inline TCGv_i32 load_reg(DisasContext *s, int reg) { diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index ab6b346e35..cec672f229 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -404,8 +404,6 @@ static inline void gen_goto_tb(DisasContext *s, int n, uint64_t dest) gen_a64_set_pc_im(dest); if (s->ss_active) { gen_step_complete_exception(s); - } else if (s->base.singlestep_enabled) { - gen_exception_internal(EXCP_DEBUG); } else { tcg_gen_lookup_and_goto_ptr(); s->base.is_jmp = DISAS_NORETURN; @@ -1045,7 +1043,7 @@ static void read_vec_element(DisasContext *s, TCGv_i64 tcg_dest, int srcidx, int element, MemOp memop) { int vect_off = vec_reg_offset(s, srcidx, element, memop & MO_SIZE); - switch (memop) { + switch ((unsigned)memop) { case MO_8: tcg_gen_ld8u_i64(tcg_dest, cpu_env, vect_off); break; @@ -14879,7 +14877,7 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); - if (unlikely(dc->base.singlestep_enabled || dc->ss_active)) { + if (unlikely(dc->ss_active)) { /* Note that this means single stepping WFI doesn't halt the CPU. * For conditional branch insns this is harmless unreachable code as * gen_goto_tb() has already handled emitting the debug exception @@ -14891,11 +14889,7 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) /* fall through */ case DISAS_EXIT: case DISAS_JUMP: - if (dc->base.singlestep_enabled) { - gen_exception_internal(EXCP_DEBUG); - } else { - gen_step_complete_exception(dc); - } + gen_step_complete_exception(dc); break; case DISAS_NORETURN: break; diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c index bc91a64171..76b5fe9f31 100644 --- a/target/arm/translate-sve.c +++ b/target/arm/translate-sve.c @@ -1943,20 +1943,20 @@ static void do_sat_addsub_32(TCGv_i64 reg, TCGv_i64 val, bool u, bool d) static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d) { TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2; if (u) { if (d) { tcg_gen_sub_i64(t0, reg, val); - tcg_gen_movi_i64(t1, 0); - tcg_gen_movcond_i64(TCG_COND_LTU, reg, reg, val, t1, t0); + t2 = tcg_constant_i64(0); + tcg_gen_movcond_i64(TCG_COND_LTU, reg, reg, val, t2, t0); } else { tcg_gen_add_i64(t0, reg, val); - tcg_gen_movi_i64(t1, -1); - tcg_gen_movcond_i64(TCG_COND_LTU, reg, t0, reg, t1, t0); + t2 = tcg_constant_i64(-1); + tcg_gen_movcond_i64(TCG_COND_LTU, reg, t0, reg, t2, t0); } } else { + TCGv_i64 t1 = tcg_temp_new_i64(); if (d) { /* Detect signed overflow for subtraction. */ tcg_gen_xor_i64(t0, reg, val); @@ -1966,7 +1966,7 @@ static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d) /* Bound the result. */ tcg_gen_movi_i64(reg, INT64_MIN); - t2 = tcg_const_i64(0); + t2 = tcg_constant_i64(0); tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, reg, t1); } else { /* Detect signed overflow for addition. */ @@ -1977,13 +1977,12 @@ static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d) /* Bound the result. */ tcg_gen_movi_i64(t1, INT64_MAX); - t2 = tcg_const_i64(0); + t2 = tcg_constant_i64(0); tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, t1, reg); } - tcg_temp_free_i64(t2); + tcg_temp_free_i64(t1); } tcg_temp_free_i64(t0); - tcg_temp_free_i64(t1); } /* Similarly with a vector and a scalar operand. */ diff --git a/target/arm/translate.c b/target/arm/translate.c index f7086c66a5..98f5925928 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -341,7 +341,7 @@ static void gen_exception_internal(int excp) tcg_temp_free_i32(tcg_excp); } -static void gen_step_complete_exception(DisasContext *s) +static void gen_singlestep_exception(DisasContext *s) { /* We just completed step of an insn. Move from Active-not-pending * to Active-pending, and then also take the swstep exception. @@ -357,30 +357,6 @@ static void gen_step_complete_exception(DisasContext *s) s->base.is_jmp = DISAS_NORETURN; } -static void gen_singlestep_exception(DisasContext *s) -{ - /* Generate the right kind of exception for singlestep, which is - * either the architectural singlestep or EXCP_DEBUG for QEMU's - * gdb singlestepping. - */ - if (s->ss_active) { - gen_step_complete_exception(s); - } else { - gen_exception_internal(EXCP_DEBUG); - } -} - -static inline bool is_singlestepping(DisasContext *s) -{ - /* Return true if we are singlestepping either because of - * architectural singlestep or QEMU gdbstub singlestep. This does - * not include the command line '-singlestep' mode which is rather - * misnamed as it only means "one instruction per TB" and doesn't - * affect the code we generate. - */ - return s->base.singlestep_enabled || s->ss_active; -} - void clear_eci_state(DisasContext *s) { /* @@ -388,8 +364,7 @@ void clear_eci_state(DisasContext *s) * multiple insn executes. */ if (s->eci) { - TCGv_i32 tmp = tcg_const_i32(0); - store_cpu_field(tmp, condexec_bits); + store_cpu_field_constant(0, condexec_bits); s->eci = 0; } } @@ -413,13 +388,12 @@ static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b) void gen_rev16(TCGv_i32 dest, TCGv_i32 var) { TCGv_i32 tmp = tcg_temp_new_i32(); - TCGv_i32 mask = tcg_const_i32(0x00ff00ff); + TCGv_i32 mask = tcg_constant_i32(0x00ff00ff); tcg_gen_shri_i32(tmp, var, 8); tcg_gen_and_i32(tmp, tmp, mask); tcg_gen_and_i32(var, var, mask); tcg_gen_shli_i32(var, var, 8); tcg_gen_or_i32(dest, var, tmp); - tcg_temp_free_i32(mask); tcg_temp_free_i32(tmp); } @@ -764,9 +738,8 @@ void gen_set_condexec(DisasContext *s) { if (s->condexec_mask) { uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1); - TCGv_i32 tmp = tcg_temp_new_i32(); - tcg_gen_movi_i32(tmp, val); - store_cpu_field(tmp, condexec_bits); + + store_cpu_field_constant(val, condexec_bits); } } @@ -837,7 +810,7 @@ static inline void gen_bx_excret_final_code(DisasContext *s) /* Is the new PC value in the magic range indicating exception return? */ tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label); /* No: end the TB as we would for a DISAS_JMP */ - if (is_singlestepping(s)) { + if (s->ss_active) { gen_singlestep_exception(s); } else { tcg_gen_exit_tb(NULL, 0); @@ -2606,7 +2579,7 @@ static void gen_goto_tb(DisasContext *s, int n, target_ulong dest) /* Jump, specifying which TB number to use if we gen_goto_tb() */ static inline void gen_jmp_tb(DisasContext *s, uint32_t dest, int tbno) { - if (unlikely(is_singlestepping(s))) { + if (unlikely(s->ss_active)) { /* An indirect jump so that we still trigger the debug exception. */ gen_set_pc_im(s, dest); s->base.is_jmp = DISAS_JUMP; @@ -7873,10 +7846,9 @@ static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub) t3 = tcg_temp_new_i32(); tcg_gen_sari_i32(t3, t1, 31); qf = load_cpu_field(QF); - one = tcg_const_i32(1); + one = tcg_constant_i32(1); tcg_gen_movcond_i32(TCG_COND_NE, qf, t2, t3, one, qf); store_cpu_field(qf, QF); - tcg_temp_free_i32(one); tcg_temp_free_i32(t3); tcg_temp_free_i32(t2); } @@ -8387,8 +8359,6 @@ static bool trans_BL(DisasContext *s, arg_i *a) static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a) { - TCGv_i32 tmp; - /* * BLX would be useless on M-profile; the encoding space * is used for other insns from v8.1M onward, and UNDEFs before that. @@ -8402,8 +8372,7 @@ static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a) return false; } tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb); - tmp = tcg_const_i32(!s->thumb); - store_cpu_field(tmp, thumb); + store_cpu_field_constant(!s->thumb, thumb); gen_jmp(s, (read_pc(s) & ~3) + a->imm); return true; } @@ -8702,7 +8671,6 @@ static bool trans_LCTP(DisasContext *s, arg_LCTP *a) * doesn't cache branch information, all we need to do is reset * FPSCR.LTPSIZE to 4. */ - TCGv_i32 ltpsize; if (!dc_isar_feature(aa32_lob, s) || !dc_isar_feature(aa32_mve, s)) { @@ -8713,8 +8681,7 @@ static bool trans_LCTP(DisasContext *s, arg_LCTP *a) return true; } - ltpsize = tcg_const_i32(4); - store_cpu_field(ltpsize, v7m.ltpsize); + store_cpu_field_constant(4, v7m.ltpsize); return true; } @@ -9459,7 +9426,7 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK; /* If architectural single step active, limit to 1. */ - if (is_singlestepping(dc)) { + if (dc->ss_active) { dc->base.max_insns = 1; } @@ -9512,9 +9479,7 @@ static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu) /* Reset the conditional execution bits immediately. This avoids complications trying to do it at the end of the block. */ if (dc->condexec_mask || dc->condexec_cond) { - TCGv_i32 tmp = tcg_temp_new_i32(); - tcg_gen_movi_i32(tmp, 0); - store_cpu_field(tmp, condexec_bits); + store_cpu_field_constant(0, condexec_bits); } } @@ -9794,7 +9759,7 @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) * insn codepath itself. */ gen_bx_excret_final_code(dc); - } else if (unlikely(is_singlestepping(dc))) { + } else if (unlikely(dc->ss_active)) { /* Unconditional and "condition passed" instruction codepath. */ switch (dc->base.is_jmp) { case DISAS_SWI: @@ -9889,7 +9854,7 @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) /* "Condition failed" instruction codepath for the branch/trap insn */ gen_set_label(dc->condlabel); gen_set_condexec(dc); - if (unlikely(is_singlestepping(dc))) { + if (unlikely(dc->ss_active)) { gen_set_pc_im(dc, dc->base.pc_next); gen_singlestep_exception(dc); } else { diff --git a/target/avr/translate.c b/target/avr/translate.c index 438e7b13c1..af8a3e0f9c 100644 --- a/target/avr/translate.c +++ b/target/avr/translate.c @@ -1087,11 +1087,7 @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) tcg_gen_exit_tb(tb, n); } else { tcg_gen_movi_i32(cpu_pc, dest); - if (ctx->base.singlestep_enabled) { - gen_helper_debug(cpu_env); - } else { - tcg_gen_lookup_and_goto_ptr(); - } + tcg_gen_lookup_and_goto_ptr(); } ctx->base.is_jmp = DISAS_NORETURN; } @@ -3009,17 +3005,10 @@ static void avr_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) tcg_gen_movi_tl(cpu_pc, ctx->npc); /* fall through */ case DISAS_LOOKUP: - if (!ctx->base.singlestep_enabled) { - tcg_gen_lookup_and_goto_ptr(); - break; - } - /* fall through */ + tcg_gen_lookup_and_goto_ptr(); + break; case DISAS_EXIT: - if (ctx->base.singlestep_enabled) { - gen_helper_debug(cpu_env); - } else { - tcg_gen_exit_tb(NULL, 0); - } + tcg_gen_exit_tb(NULL, 0); break; default: g_assert_not_reached(); diff --git a/target/cris/cpu.c b/target/cris/cpu.c index c2e7483f5b..ed6c781342 100644 --- a/target/cris/cpu.c +++ b/target/cris/cpu.c @@ -205,9 +205,9 @@ static const struct SysemuCPUOps cris_sysemu_ops = { static const struct TCGCPUOps crisv10_tcg_ops = { .initialize = cris_initialize_crisv10_tcg, - .tlb_fill = cris_cpu_tlb_fill, #ifndef CONFIG_USER_ONLY + .tlb_fill = cris_cpu_tlb_fill, .cpu_exec_interrupt = cris_cpu_exec_interrupt, .do_interrupt = crisv10_cpu_do_interrupt, #endif /* !CONFIG_USER_ONLY */ @@ -215,9 +215,9 @@ static const struct TCGCPUOps crisv10_tcg_ops = { static const struct TCGCPUOps crisv32_tcg_ops = { .initialize = cris_initialize_tcg, - .tlb_fill = cris_cpu_tlb_fill, #ifndef CONFIG_USER_ONLY + .tlb_fill = cris_cpu_tlb_fill, .cpu_exec_interrupt = cris_cpu_exec_interrupt, .do_interrupt = cris_cpu_do_interrupt, #endif /* !CONFIG_USER_ONLY */ diff --git a/target/cris/cpu.h b/target/cris/cpu.h index 6603565f83..b445b194ea 100644 --- a/target/cris/cpu.h +++ b/target/cris/cpu.h @@ -189,6 +189,10 @@ extern const VMStateDescription vmstate_cris_cpu; void cris_cpu_do_interrupt(CPUState *cpu); void crisv10_cpu_do_interrupt(CPUState *cpu); bool cris_cpu_exec_interrupt(CPUState *cpu, int int_req); + +bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); #endif void cris_cpu_dump_state(CPUState *cs, FILE *f, int flags); @@ -251,10 +255,6 @@ static inline int cpu_mmu_index (CPUCRISState *env, bool ifetch) return !!(env->pregs[PR_CCS] & U_FLAG); } -bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); - /* Support function regs. */ #define SFR_RW_GC_CFG 0][0 #define SFR_RW_MM_CFG env->pregs[PR_SRS]][0 diff --git a/target/cris/helper.c b/target/cris/helper.c index 36926faf32..a0d6ecdcd3 100644 --- a/target/cris/helper.c +++ b/target/cris/helper.c @@ -39,22 +39,6 @@ #define D_LOG(...) do { } while (0) #endif -#if defined(CONFIG_USER_ONLY) - -bool cris_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) -{ - CRISCPU *cpu = CRIS_CPU(cs); - - cs->exception_index = 0xaa; - cpu->env.pregs[PR_EDA] = address; - cpu_loop_exit_restore(cs, retaddr); -} - -#else /* !CONFIG_USER_ONLY */ - - static void cris_shift_ccs(CPUCRISState *env) { uint32_t ccs; @@ -304,5 +288,3 @@ bool cris_cpu_exec_interrupt(CPUState *cs, int interrupt_request) return ret; } - -#endif /* !CONFIG_USER_ONLY */ diff --git a/target/cris/meson.build b/target/cris/meson.build index 67c3793c85..c1e326d950 100644 --- a/target/cris/meson.build +++ b/target/cris/meson.build @@ -2,13 +2,16 @@ cris_ss = ss.source_set() cris_ss.add(files( 'cpu.c', 'gdbstub.c', - 'helper.c', 'op_helper.c', 'translate.c', )) cris_softmmu_ss = ss.source_set() -cris_softmmu_ss.add(files('mmu.c', 'machine.c')) +cris_softmmu_ss.add(files( + 'helper.c', + 'machine.c', + 'mmu.c', +)) target_arch += {'cris': cris_ss} target_softmmu_arch += {'cris': cris_softmmu_ss} diff --git a/target/cris/translate.c b/target/cris/translate.c index a84b753349..59325b388a 100644 --- a/target/cris/translate.c +++ b/target/cris/translate.c @@ -3249,22 +3249,6 @@ static void cris_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) } } - if (unlikely(dc->base.singlestep_enabled)) { - switch (is_jmp) { - case DISAS_TOO_MANY: - case DISAS_UPDATE_NEXT: - tcg_gen_movi_tl(env_pc, npc); - /* fall through */ - case DISAS_JUMP: - case DISAS_UPDATE: - t_gen_raise_exception(EXCP_DEBUG); - return; - default: - break; - } - g_assert_not_reached(); - } - switch (is_jmp) { case DISAS_TOO_MANY: gen_goto_tb(dc, 0, npc); diff --git a/target/hexagon/README b/target/hexagon/README index b0b2435070..372e24747c 100644 --- a/target/hexagon/README +++ b/target/hexagon/README @@ -1,9 +1,13 @@ Hexagon is Qualcomm's very long instruction word (VLIW) digital signal -processor(DSP). +processor(DSP). We also support Hexagon Vector eXtensions (HVX). HVX +is a wide vector coprocessor designed for high performance computer vision, +image processing, machine learning, and other workloads. The following versions of the Hexagon core are supported Scalar core: v67 https://developer.qualcomm.com/downloads/qualcomm-hexagon-v67-programmer-s-reference-manual + HVX extension: v66 + https://developer.qualcomm.com/downloads/qualcomm-hexagon-v66-hvx-programmer-s-reference-manual We presented an overview of the project at the 2019 KVM Forum. https://kvmforum2019.sched.com/event/Tmwc/qemu-hexagon-automatic-translation-of-the-isa-manual-pseudcode-to-tiny-code-instructions-of-a-vliw-architecture-niccolo-izzo-revng-taylor-simpson-qualcomm-innovation-center @@ -124,6 +128,71 @@ There are also cases where we brute force the TCG code generation. Instructions with multiple definitions are examples. These require special handling because qemu helpers can only return a single value. +For HVX vectors, the generator behaves slightly differently. The wide vectors +won't fit in a TCGv or TCGv_i64, so we pass TCGv_ptr variables to pass the +address to helper functions. Here's an example for an HVX vector-add-word +istruction. + static void generate_V6_vaddw( + CPUHexagonState *env, + DisasContext *ctx, + Insn *insn, + Packet *pkt) + { + const int VdN = insn->regno[0]; + const intptr_t VdV_off = + ctx_future_vreg_off(ctx, VdN, 1, true); + TCGv_ptr VdV = tcg_temp_local_new_ptr(); + tcg_gen_addi_ptr(VdV, cpu_env, VdV_off); + const int VuN = insn->regno[1]; + const intptr_t VuV_off = + vreg_src_off(ctx, VuN); + TCGv_ptr VuV = tcg_temp_local_new_ptr(); + const int VvN = insn->regno[2]; + const intptr_t VvV_off = + vreg_src_off(ctx, VvN); + TCGv_ptr VvV = tcg_temp_local_new_ptr(); + tcg_gen_addi_ptr(VuV, cpu_env, VuV_off); + tcg_gen_addi_ptr(VvV, cpu_env, VvV_off); + TCGv slot = tcg_constant_tl(insn->slot); + gen_helper_V6_vaddw(cpu_env, VdV, VuV, VvV, slot); + tcg_temp_free(slot); + gen_log_vreg_write(ctx, VdV_off, VdN, EXT_DFL, insn->slot, false); + ctx_log_vreg_write(ctx, VdN, EXT_DFL, false); + tcg_temp_free_ptr(VdV); + tcg_temp_free_ptr(VuV); + tcg_temp_free_ptr(VvV); + } + +Notice that we also generate a variable named _off for each operand of +the instruction. This makes it easy to override the instruction semantics with +functions from tcg-op-gvec.h. Here's the override for this instruction. + #define fGEN_TCG_V6_vaddw(SHORTCODE) \ + tcg_gen_gvec_add(MO_32, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +Finally, we notice that the override doesn't use the TCGv_ptr variables, so +we don't generate them when an override is present. Here is what we generate +when the override is present. + static void generate_V6_vaddw( + CPUHexagonState *env, + DisasContext *ctx, + Insn *insn, + Packet *pkt) + { + const int VdN = insn->regno[0]; + const intptr_t VdV_off = + ctx_future_vreg_off(ctx, VdN, 1, true); + const int VuN = insn->regno[1]; + const intptr_t VuV_off = + vreg_src_off(ctx, VuN); + const int VvN = insn->regno[2]; + const intptr_t VvV_off = + vreg_src_off(ctx, VvN); + fGEN_TCG_V6_vaddw({ fHIDE(int i;) fVFOREACH(32, i) { VdV.w[i] = VuV.w[i] + VvV.w[i] ; } }); + gen_log_vreg_write(ctx, VdV_off, VdN, EXT_DFL, insn->slot, false); + ctx_log_vreg_write(ctx, VdN, EXT_DFL, false); + } + In addition to instruction semantics, we use a generator to create the decode tree. This generation is also a two step process. The first step is to run target/hexagon/gen_dectree_import.c to produce @@ -140,6 +209,7 @@ runtime information for each thread and contains stuff like the GPR and predicate registers. macros.h +mmvec/macros.h The Hexagon arch lib relies heavily on macros for the instruction semantics. This is a great advantage for qemu because we can override them for different @@ -203,6 +273,15 @@ During runtime, the following fields in CPUHexagonState (see cpu.h) are used pred_written boolean indicating if predicate was written mem_log_stores record of the stores (indexed by slot) +For Hexagon Vector eXtensions (HVX), the following fields are used + VRegs Vector registers + future_VRegs Registers to be stored during packet commit + tmp_VRegs Temporary registers *not* stored during commit + VRegs_updated Mask of predicated vector writes + QRegs Q (vector predicate) registers + future_QRegs Registers to be stored during packet commit + QRegs_updated Mask of predicated vector writes + *** Debugging *** You can turn on a lot of debugging by changing the HEX_DEBUG macro to 1 in diff --git a/target/hexagon/attribs_def.h.inc b/target/hexagon/attribs_def.h.inc index 381550909d..dc890a557f 100644 --- a/target/hexagon/attribs_def.h.inc +++ b/target/hexagon/attribs_def.h.inc @@ -41,6 +41,27 @@ DEF_ATTRIB(STORE, "Stores to memory", "", "") DEF_ATTRIB(MEMLIKE, "Memory-like instruction", "", "") DEF_ATTRIB(MEMLIKE_PACKET_RULES, "follows Memory-like packet rules", "", "") +/* V6 Vector attributes */ +DEF_ATTRIB(CVI, "Executes on the HVX extension", "", "") + +DEF_ATTRIB(CVI_NEW, "New value memory instruction executes on HVX", "", "") +DEF_ATTRIB(CVI_VM, "Memory instruction executes on HVX", "", "") +DEF_ATTRIB(CVI_VP, "Permute instruction executes on HVX", "", "") +DEF_ATTRIB(CVI_VP_VS, "Double vector permute/shft insn executes on HVX", "", "") +DEF_ATTRIB(CVI_VX, "Multiply instruction executes on HVX", "", "") +DEF_ATTRIB(CVI_VX_DV, "Double vector multiply insn executes on HVX", "", "") +DEF_ATTRIB(CVI_VS, "Shift instruction executes on HVX", "", "") +DEF_ATTRIB(CVI_VS_VX, "Permute/shift and multiply insn executes on HVX", "", "") +DEF_ATTRIB(CVI_VA, "ALU instruction executes on HVX", "", "") +DEF_ATTRIB(CVI_VA_DV, "Double vector alu instruction executes on HVX", "", "") +DEF_ATTRIB(CVI_4SLOT, "Consumes all the vector execution resources", "", "") +DEF_ATTRIB(CVI_TMP, "Transient Memory Load not written to register", "", "") +DEF_ATTRIB(CVI_GATHER, "CVI Gather operation", "", "") +DEF_ATTRIB(CVI_SCATTER, "CVI Scatter operation", "", "") +DEF_ATTRIB(CVI_SCATTER_RELEASE, "CVI Store Release for scatter", "", "") +DEF_ATTRIB(CVI_TMP_DST, "CVI instruction that doesn't write a register", "", "") +DEF_ATTRIB(CVI_SLOT23, "Can execute in slot 2 or slot 3 (HVX)", "", "") + /* Change-of-flow attributes */ DEF_ATTRIB(JUMP, "Jump-type instruction", "", "") @@ -64,6 +85,7 @@ DEF_ATTRIB(IMPLICIT_WRITES_P1, "Writes Predicate 1", "", "UREG.P1") DEF_ATTRIB(IMPLICIT_WRITES_P2, "Writes Predicate 1", "", "UREG.P2") DEF_ATTRIB(IMPLICIT_WRITES_P3, "May write Predicate 3", "", "UREG.P3") DEF_ATTRIB(IMPLICIT_READS_PC, "Reads the PC register", "", "") +DEF_ATTRIB(IMPLICIT_WRITES_USR, "May write USR", "", "") DEF_ATTRIB(WRITES_PRED_REG, "Writes a predicate register", "", "") DEF_ATTRIB(CRSLOT23, "Can execute in slot 2 or slot 3 (CR)", "", "") @@ -86,6 +108,7 @@ DEF_ATTRIB(HWLOOP1_END, "Ends HW loop1", "", "") DEF_ATTRIB(DCZEROA, "dczeroa type", "", "") DEF_ATTRIB(ICFLUSHOP, "icflush op type", "", "") DEF_ATTRIB(DCFLUSHOP, "dcflush op type", "", "") +DEF_ATTRIB(L2FLUSHOP, "l2flush op type", "", "") DEF_ATTRIB(DCFETCH, "dcfetch type", "", "") DEF_ATTRIB(L2FETCH, "Instruction is l2fetch type", "", "") diff --git a/target/hexagon/cpu.c b/target/hexagon/cpu.c index 3338365c16..fa9bd702d6 100644 --- a/target/hexagon/cpu.c +++ b/target/hexagon/cpu.c @@ -59,7 +59,7 @@ const char * const hexagon_regnames[TOTAL_PER_THREAD_REGS] = { "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", "sa0", "lc0", "sa1", "lc1", "p3_0", "c5", "m0", "m1", "usr", "pc", "ugp", "gp", "cs0", "cs1", "c14", "c15", - "c16", "c17", "c18", "c19", "pkt_cnt", "insn_cnt", "c22", "c23", + "c16", "c17", "c18", "c19", "pkt_cnt", "insn_cnt", "hvx_cnt", "c23", "c24", "c25", "c26", "c27", "c28", "c29", "c30", "c31", }; @@ -113,7 +113,66 @@ static void print_reg(FILE *f, CPUHexagonState *env, int regnum) hexagon_regnames[regnum], value); } -static void hexagon_dump(CPUHexagonState *env, FILE *f) +static void print_vreg(FILE *f, CPUHexagonState *env, int regnum, + bool skip_if_zero) +{ + if (skip_if_zero) { + bool nonzero_found = false; + for (int i = 0; i < MAX_VEC_SIZE_BYTES; i++) { + if (env->VRegs[regnum].ub[i] != 0) { + nonzero_found = true; + break; + } + } + if (!nonzero_found) { + return; + } + } + + qemu_fprintf(f, " v%d = ( ", regnum); + qemu_fprintf(f, "0x%02x", env->VRegs[regnum].ub[MAX_VEC_SIZE_BYTES - 1]); + for (int i = MAX_VEC_SIZE_BYTES - 2; i >= 0; i--) { + qemu_fprintf(f, ", 0x%02x", env->VRegs[regnum].ub[i]); + } + qemu_fprintf(f, " )\n"); +} + +void hexagon_debug_vreg(CPUHexagonState *env, int regnum) +{ + print_vreg(stdout, env, regnum, false); +} + +static void print_qreg(FILE *f, CPUHexagonState *env, int regnum, + bool skip_if_zero) +{ + if (skip_if_zero) { + bool nonzero_found = false; + for (int i = 0; i < MAX_VEC_SIZE_BYTES / 8; i++) { + if (env->QRegs[regnum].ub[i] != 0) { + nonzero_found = true; + break; + } + } + if (!nonzero_found) { + return; + } + } + + qemu_fprintf(f, " q%d = ( ", regnum); + qemu_fprintf(f, "0x%02x", + env->QRegs[regnum].ub[MAX_VEC_SIZE_BYTES / 8 - 1]); + for (int i = MAX_VEC_SIZE_BYTES / 8 - 2; i >= 0; i--) { + qemu_fprintf(f, ", 0x%02x", env->QRegs[regnum].ub[i]); + } + qemu_fprintf(f, " )\n"); +} + +void hexagon_debug_qreg(CPUHexagonState *env, int regnum) +{ + print_qreg(stdout, env, regnum, false); +} + +static void hexagon_dump(CPUHexagonState *env, FILE *f, int flags) { HexagonCPU *cpu = env_archcpu(env); @@ -159,6 +218,17 @@ static void hexagon_dump(CPUHexagonState *env, FILE *f) print_reg(f, env, HEX_REG_CS1); #endif qemu_fprintf(f, "}\n"); + + if (flags & CPU_DUMP_FPU) { + qemu_fprintf(f, "Vector Registers = {\n"); + for (int i = 0; i < NUM_VREGS; i++) { + print_vreg(f, env, i, true); + } + for (int i = 0; i < NUM_QREGS; i++) { + print_qreg(f, env, i, true); + } + qemu_fprintf(f, "}\n"); + } } static void hexagon_dump_state(CPUState *cs, FILE *f, int flags) @@ -166,12 +236,12 @@ static void hexagon_dump_state(CPUState *cs, FILE *f, int flags) HexagonCPU *cpu = HEXAGON_CPU(cs); CPUHexagonState *env = &cpu->env; - hexagon_dump(env, f); + hexagon_dump(env, f, flags); } void hexagon_debug(CPUHexagonState *env) { - hexagon_dump(env, stdout); + hexagon_dump(env, stdout, CPU_DUMP_FPU); } static void hexagon_cpu_set_pc(CPUState *cs, vaddr value) @@ -245,34 +315,11 @@ static void hexagon_cpu_init(Object *obj) qdev_property_add_static(DEVICE(obj), &hexagon_lldb_stack_adjust_property); } -static bool hexagon_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) -{ -#ifdef CONFIG_USER_ONLY - switch (access_type) { - case MMU_INST_FETCH: - cs->exception_index = HEX_EXCP_FETCH_NO_UPAGE; - break; - case MMU_DATA_LOAD: - cs->exception_index = HEX_EXCP_PRIV_NO_UREAD; - break; - case MMU_DATA_STORE: - cs->exception_index = HEX_EXCP_PRIV_NO_UWRITE; - break; - } - cpu_loop_exit_restore(cs, retaddr); -#else -#error System mode not implemented for Hexagon -#endif -} - #include "hw/core/tcg-cpu-ops.h" static const struct TCGCPUOps hexagon_tcg_ops = { .initialize = hexagon_translate_init, .synchronize_from_tb = hexagon_cpu_synchronize_from_tb, - .tlb_fill = hexagon_tlb_fill, }; static void hexagon_cpu_class_init(ObjectClass *c, void *data) @@ -292,7 +339,7 @@ static void hexagon_cpu_class_init(ObjectClass *c, void *data) cc->set_pc = hexagon_cpu_set_pc; cc->gdb_read_register = hexagon_gdb_read_register; cc->gdb_write_register = hexagon_gdb_write_register; - cc->gdb_num_core_regs = TOTAL_PER_THREAD_REGS; + cc->gdb_num_core_regs = TOTAL_PER_THREAD_REGS + NUM_VREGS + NUM_QREGS; cc->gdb_stop_before_watchpoint = true; cc->disas_set_info = hexagon_cpu_disas_set_info; cc->tcg_ops = &hexagon_tcg_ops; diff --git a/target/hexagon/cpu.h b/target/hexagon/cpu.h index f7d043865b..de121d950f 100644 --- a/target/hexagon/cpu.h +++ b/target/hexagon/cpu.h @@ -26,6 +26,7 @@ typedef struct CPUHexagonState CPUHexagonState; #include "qemu-common.h" #include "exec/cpu-defs.h" #include "hex_regs.h" +#include "mmvec/mmvec.h" #define NUM_PREGS 4 #define TOTAL_PER_THREAD_REGS 64 @@ -34,6 +35,7 @@ typedef struct CPUHexagonState CPUHexagonState; #define STORES_MAX 2 #define REG_WRITES_MAX 32 #define PRED_WRITES_MAX 5 /* 4 insns + endloop */ +#define VSTORES_MAX 2 #define TYPE_HEXAGON_CPU "hexagon-cpu" @@ -52,6 +54,13 @@ typedef struct { uint64_t data64; } MemLog; +typedef struct { + target_ulong va; + int size; + DECLARE_BITMAP(mask, MAX_VEC_SIZE_BYTES) QEMU_ALIGNED(16); + MMVector data QEMU_ALIGNED(16); +} VStoreLog; + #define EXEC_STATUS_OK 0x0000 #define EXEC_STATUS_STOP 0x0002 #define EXEC_STATUS_REPLAY 0x0010 @@ -64,6 +73,9 @@ typedef struct { #define CLEAR_EXCEPTION (env->status &= (~EXEC_STATUS_EXCEPTION)) #define SET_EXCEPTION (env->status |= EXEC_STATUS_EXCEPTION) +/* Maximum number of vector temps in a packet */ +#define VECTOR_TEMPS_MAX 4 + struct CPUHexagonState { target_ulong gpr[TOTAL_PER_THREAD_REGS]; target_ulong pred[NUM_PREGS]; @@ -97,8 +109,27 @@ struct CPUHexagonState { target_ulong llsc_val; uint64_t llsc_val_i64; - target_ulong is_gather_store_insn; - target_ulong gather_issued; + MMVector VRegs[NUM_VREGS] QEMU_ALIGNED(16); + MMVector future_VRegs[VECTOR_TEMPS_MAX] QEMU_ALIGNED(16); + MMVector tmp_VRegs[VECTOR_TEMPS_MAX] QEMU_ALIGNED(16); + + VRegMask VRegs_updated; + + MMQReg QRegs[NUM_QREGS] QEMU_ALIGNED(16); + MMQReg future_QRegs[NUM_QREGS] QEMU_ALIGNED(16); + QRegMask QRegs_updated; + + /* Temporaries used within instructions */ + MMVectorPair VuuV QEMU_ALIGNED(16); + MMVectorPair VvvV QEMU_ALIGNED(16); + MMVectorPair VxxV QEMU_ALIGNED(16); + MMVector vtmp QEMU_ALIGNED(16); + MMQReg qtmp QEMU_ALIGNED(16); + + VStoreLog vstore[VSTORES_MAX]; + target_ulong vstore_pending[VSTORES_MAX]; + bool vtcm_pending; + VTCMStoreLog vtcm_log; }; #define HEXAGON_CPU_CLASS(klass) \ @@ -141,6 +172,15 @@ static inline void cpu_get_tb_cpu_state(CPUHexagonState *env, target_ulong *pc, #endif } +static inline int cpu_mmu_index(CPUHexagonState *env, bool ifetch) +{ +#ifdef CONFIG_USER_ONLY + return MMU_USER_IDX; +#else +#error System mode not supported on Hexagon yet +#endif +} + typedef struct CPUHexagonState CPUArchState; typedef HexagonCPU ArchCPU; diff --git a/target/hexagon/decode.c b/target/hexagon/decode.c index d424245598..6f0f27b4ba 100644 --- a/target/hexagon/decode.c +++ b/target/hexagon/decode.c @@ -22,6 +22,7 @@ #include "decode.h" #include "insn.h" #include "printinsn.h" +#include "mmvec/decode_ext_mmvec.h" #define fZXTN(N, M, VAL) ((VAL) & ((1LL << (N)) - 1)) @@ -46,6 +47,7 @@ enum { /* Name Num Table */ DEF_REGMAP(R_16, 16, 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23) DEF_REGMAP(R__8, 8, 0, 2, 4, 6, 16, 18, 20, 22) +DEF_REGMAP(R_8, 8, 0, 1, 2, 3, 4, 5, 6, 7) #define DECODE_MAPPED_REG(OPNUM, NAME) \ insn->regno[OPNUM] = DECODE_REGISTER_##NAME[insn->regno[OPNUM]]; @@ -157,6 +159,9 @@ static void decode_ext_init(void) for (i = EXT_IDX_noext; i < EXT_IDX_noext_AFTER; i++) { ext_trees[i] = &dectree_table_DECODE_EXT_EXT_noext; } + for (i = EXT_IDX_mmvec; i < EXT_IDX_mmvec_AFTER; i++) { + ext_trees[i] = &dectree_table_DECODE_EXT_EXT_mmvec; + } } typedef struct { @@ -566,8 +571,12 @@ static void decode_remove_extenders(Packet *packet) static SlotMask get_valid_slots(const Packet *pkt, unsigned int slot) { - return find_iclass_slots(pkt->insn[slot].opcode, - pkt->insn[slot].iclass); + if (GET_ATTRIB(pkt->insn[slot].opcode, A_EXTENSION)) { + return mmvec_ext_decode_find_iclass_slots(pkt->insn[slot].opcode); + } else { + return find_iclass_slots(pkt->insn[slot].opcode, + pkt->insn[slot].iclass); + } } #define DECODE_NEW_TABLE(TAG, SIZE, WHATNOT) /* NOTHING */ @@ -728,6 +737,11 @@ decode_insns_tablewalk(Insn *insn, const DectreeTable *table, } decode_op(insn, opc, encoding); return 1; + } else if (table->table[i].type == DECTREE_EXTSPACE) { + /* + * For now, HVX will be the only coproc + */ + return decode_insns_tablewalk(insn, ext_trees[EXT_IDX_mmvec], encoding); } else { return 0; } @@ -874,6 +888,7 @@ int decode_packet(int max_words, const uint32_t *words, Packet *pkt, int words_read = 0; bool end_of_packet = false; int new_insns = 0; + int i; uint32_t encoding32; /* Initialize */ @@ -901,6 +916,11 @@ int decode_packet(int max_words, const uint32_t *words, Packet *pkt, return 0; } pkt->encod_pkt_size_in_bytes = words_read * 4; + pkt->pkt_has_hvx = false; + for (i = 0; i < num_insns; i++) { + pkt->pkt_has_hvx |= + GET_ATTRIB(pkt->insn[i].opcode, A_CVI); + } /* * Check for :endloop in the parse bits @@ -931,6 +951,10 @@ int decode_packet(int max_words, const uint32_t *words, Packet *pkt, decode_set_slot_number(pkt); decode_fill_newvalue_regno(pkt); + if (pkt->pkt_has_hvx) { + mmvec_ext_decode_checks(pkt, disas_only); + } + if (!disas_only) { decode_shuffle_for_execution(pkt); decode_split_cmpjump(pkt); diff --git a/target/hexagon/gen_dectree_import.c b/target/hexagon/gen_dectree_import.c index 5b7ecfc6b3..ee354677fd 100644 --- a/target/hexagon/gen_dectree_import.c +++ b/target/hexagon/gen_dectree_import.c @@ -40,6 +40,11 @@ const char * const opcode_names[] = { * Q6INSN(A2_add,"Rd32=add(Rs32,Rt32)",ATTRIBS(), * "Add 32-bit registers", * { RdV=RsV+RtV;}) + * HVX instructions have the following form + * EXTINSN(V6_vinsertwr, "Vx32.w=vinsert(Rt32)", + * ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX,A_CVI_LATE), + * "Insert Word Scalar into Vector", + * VxV.uw[0] = RtV;) */ const char * const opcode_syntax[XX_LAST_OPCODE] = { #define Q6INSN(TAG, BEH, ATTRIBS, DESCR, SEM) \ @@ -105,6 +110,14 @@ static const char *get_opcode_enc(int opcode) static const char *get_opcode_enc_class(int opcode) { + const char *tmp = opcode_encodings[opcode].encoding; + if (tmp == NULL) { + const char *test = "V6_"; /* HVX */ + const char *name = opcode_names[opcode]; + if (strncmp(name, test, strlen(test)) == 0) { + return "EXT_mmvec"; + } + } return opcode_enc_class_names[opcode_encodings[opcode].enc_class]; } diff --git a/target/hexagon/gen_helper_funcs.py b/target/hexagon/gen_helper_funcs.py index 2b1c5d8e3e..a446c45384 100755 --- a/target/hexagon/gen_helper_funcs.py +++ b/target/hexagon/gen_helper_funcs.py @@ -48,12 +48,26 @@ def gen_helper_arg_pair(f,regtype,regid,regno): if regno >= 0 : f.write(", ") f.write("int64_t %s%sV" % (regtype,regid)) +def gen_helper_arg_ext(f,regtype,regid,regno): + if regno > 0 : f.write(", ") + f.write("void *%s%sV_void" % (regtype,regid)) + +def gen_helper_arg_ext_pair(f,regtype,regid,regno): + if regno > 0 : f.write(", ") + f.write("void *%s%sV_void" % (regtype,regid)) + def gen_helper_arg_opn(f,regtype,regid,i,tag): if (hex_common.is_pair(regid)): - gen_helper_arg_pair(f,regtype,regid,i) + if (hex_common.is_hvx_reg(regtype)): + gen_helper_arg_ext_pair(f,regtype,regid,i) + else: + gen_helper_arg_pair(f,regtype,regid,i) elif (hex_common.is_single(regid)): if hex_common.is_old_val(regtype, regid, tag): - gen_helper_arg(f,regtype,regid,i) + if (hex_common.is_hvx_reg(regtype)): + gen_helper_arg_ext(f,regtype,regid,i) + else: + gen_helper_arg(f,regtype,regid,i) elif hex_common.is_new_val(regtype, regid, tag): gen_helper_arg_new(f,regtype,regid,i) else: @@ -72,25 +86,67 @@ def gen_helper_dest_decl_pair(f,regtype,regid,regno,subfield=""): f.write(" int64_t %s%sV%s = 0;\n" % \ (regtype,regid,subfield)) +def gen_helper_dest_decl_ext(f,regtype,regid): + if (regtype == "Q"): + f.write(" /* %s%sV is *(MMQReg *)(%s%sV_void) */\n" % \ + (regtype,regid,regtype,regid)) + else: + f.write(" /* %s%sV is *(MMVector *)(%s%sV_void) */\n" % \ + (regtype,regid,regtype,regid)) + +def gen_helper_dest_decl_ext_pair(f,regtype,regid,regno): + f.write(" /* %s%sV is *(MMVectorPair *))%s%sV_void) */\n" % \ + (regtype,regid,regtype, regid)) + def gen_helper_dest_decl_opn(f,regtype,regid,i): if (hex_common.is_pair(regid)): - gen_helper_dest_decl_pair(f,regtype,regid,i) + if (hex_common.is_hvx_reg(regtype)): + gen_helper_dest_decl_ext_pair(f,regtype,regid, i) + else: + gen_helper_dest_decl_pair(f,regtype,regid,i) elif (hex_common.is_single(regid)): - gen_helper_dest_decl(f,regtype,regid,i) + if (hex_common.is_hvx_reg(regtype)): + gen_helper_dest_decl_ext(f,regtype,regid) + else: + gen_helper_dest_decl(f,regtype,regid,i) else: print("Bad register parse: ",regtype,regid,toss,numregs) +def gen_helper_src_var_ext(f,regtype,regid): + if (regtype == "Q"): + f.write(" /* %s%sV is *(MMQReg *)(%s%sV_void) */\n" % \ + (regtype,regid,regtype,regid)) + else: + f.write(" /* %s%sV is *(MMVector *)(%s%sV_void) */\n" % \ + (regtype,regid,regtype,regid)) + +def gen_helper_src_var_ext_pair(f,regtype,regid,regno): + f.write(" /* %s%sV%s is *(MMVectorPair *)(%s%sV%s_void) */\n" % \ + (regtype,regid,regno,regtype,regid,regno)) + def gen_helper_return(f,regtype,regid,regno): f.write(" return %s%sV;\n" % (regtype,regid)) def gen_helper_return_pair(f,regtype,regid,regno): f.write(" return %s%sV;\n" % (regtype,regid)) +def gen_helper_dst_write_ext(f,regtype,regid): + return + +def gen_helper_dst_write_ext_pair(f,regtype,regid): + return + def gen_helper_return_opn(f, regtype, regid, i): if (hex_common.is_pair(regid)): - gen_helper_return_pair(f,regtype,regid,i) + if (hex_common.is_hvx_reg(regtype)): + gen_helper_dst_write_ext_pair(f,regtype,regid) + else: + gen_helper_return_pair(f,regtype,regid,i) elif (hex_common.is_single(regid)): - gen_helper_return(f,regtype,regid,i) + if (hex_common.is_hvx_reg(regtype)): + gen_helper_dst_write_ext(f,regtype,regid) + else: + gen_helper_return(f,regtype,regid,i) else: print("Bad register parse: ",regtype,regid,toss,numregs) @@ -129,14 +185,20 @@ def gen_helper_function(f, tag, tagregs, tagimms): % (tag, tag)) else: ## The return type of the function is the type of the destination - ## register + ## register (if scalar) i=0 for regtype,regid,toss,numregs in regs: if (hex_common.is_written(regid)): if (hex_common.is_pair(regid)): - gen_helper_return_type_pair(f,regtype,regid,i) + if (hex_common.is_hvx_reg(regtype)): + continue + else: + gen_helper_return_type_pair(f,regtype,regid,i) elif (hex_common.is_single(regid)): - gen_helper_return_type(f,regtype,regid,i) + if (hex_common.is_hvx_reg(regtype)): + continue + else: + gen_helper_return_type(f,regtype,regid,i) else: print("Bad register parse: ",regtype,regid,toss,numregs) i += 1 @@ -145,16 +207,37 @@ def gen_helper_function(f, tag, tagregs, tagimms): f.write("void") f.write(" HELPER(%s)(CPUHexagonState *env" % tag) + ## Arguments include the vector destination operands i = 1 + for regtype,regid,toss,numregs in regs: + if (hex_common.is_written(regid)): + if (hex_common.is_pair(regid)): + if (hex_common.is_hvx_reg(regtype)): + gen_helper_arg_ext_pair(f,regtype,regid,i) + else: + continue + elif (hex_common.is_single(regid)): + if (hex_common.is_hvx_reg(regtype)): + gen_helper_arg_ext(f,regtype,regid,i) + else: + # This is the return value of the function + continue + else: + print("Bad register parse: ",regtype,regid,toss,numregs) + i += 1 ## Arguments to the helper function are the source regs and immediates for regtype,regid,toss,numregs in regs: if (hex_common.is_read(regid)): + if (hex_common.is_hvx_reg(regtype) and + hex_common.is_readwrite(regid)): + continue gen_helper_arg_opn(f,regtype,regid,i,tag) i += 1 for immlett,bits,immshift in imms: gen_helper_arg_imm(f,immlett) i += 1 + if hex_common.need_slot(tag): if i > 0: f.write(", ") f.write("uint32_t slot") @@ -173,6 +256,17 @@ def gen_helper_function(f, tag, tagregs, tagimms): gen_helper_dest_decl_opn(f,regtype,regid,i) i += 1 + for regtype,regid,toss,numregs in regs: + if (hex_common.is_read(regid)): + if (hex_common.is_pair(regid)): + if (hex_common.is_hvx_reg(regtype)): + gen_helper_src_var_ext_pair(f,regtype,regid,i) + elif (hex_common.is_single(regid)): + if (hex_common.is_hvx_reg(regtype)): + gen_helper_src_var_ext(f,regtype,regid) + else: + print("Bad register parse: ",regtype,regid,toss,numregs) + if 'A_FPOP' in hex_common.attribdict[tag]: f.write(' arch_fpop_start(env);\n'); @@ -192,11 +286,12 @@ def main(): hex_common.read_semantics_file(sys.argv[1]) hex_common.read_attribs_file(sys.argv[2]) hex_common.read_overrides_file(sys.argv[3]) + hex_common.read_overrides_file(sys.argv[4]) hex_common.calculate_attribs() tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() - with open(sys.argv[4], 'w') as f: + with open(sys.argv[5], 'w') as f: for tag in hex_common.tags: ## Skip the priv instructions if ( "A_PRIV" in hex_common.attribdict[tag] ) : diff --git a/target/hexagon/gen_helper_protos.py b/target/hexagon/gen_helper_protos.py index ea41007ec9..3b4e993fd1 100755 --- a/target/hexagon/gen_helper_protos.py +++ b/target/hexagon/gen_helper_protos.py @@ -94,19 +94,33 @@ def gen_helper_prototype(f, tag, tagregs, tagimms): f.write('DEF_HELPER_%s(%s' % (def_helper_size, tag)) ## Generate the qemu DEF_HELPER type for each result + ## Iterate over this list twice + ## - Emit the scalar result + ## - Emit the vector result i=0 for regtype,regid,toss,numregs in regs: if (hex_common.is_written(regid)): - gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i) + if (not hex_common.is_hvx_reg(regtype)): + gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i) i += 1 ## Put the env between the outputs and inputs f.write(', env' ) i += 1 + # Second pass + for regtype,regid,toss,numregs in regs: + if (hex_common.is_written(regid)): + if (hex_common.is_hvx_reg(regtype)): + gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i) + i += 1 + ## Generate the qemu type for each input operand (regs and immediates) for regtype,regid,toss,numregs in regs: if (hex_common.is_read(regid)): + if (hex_common.is_hvx_reg(regtype) and + hex_common.is_readwrite(regid)): + continue gen_def_helper_opn(f, tag, regtype, regid, toss, numregs, i) i += 1 for immlett,bits,immshift in imms: @@ -121,11 +135,12 @@ def main(): hex_common.read_semantics_file(sys.argv[1]) hex_common.read_attribs_file(sys.argv[2]) hex_common.read_overrides_file(sys.argv[3]) + hex_common.read_overrides_file(sys.argv[4]) hex_common.calculate_attribs() tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() - with open(sys.argv[4], 'w') as f: + with open(sys.argv[5], 'w') as f: for tag in hex_common.tags: ## Skip the priv instructions if ( "A_PRIV" in hex_common.attribdict[tag] ) : diff --git a/target/hexagon/gen_semantics.c b/target/hexagon/gen_semantics.c index c5fccecc9e..4a2bdd70e9 100644 --- a/target/hexagon/gen_semantics.c +++ b/target/hexagon/gen_semantics.c @@ -44,6 +44,11 @@ int main(int argc, char *argv[]) * Q6INSN(A2_add,"Rd32=add(Rs32,Rt32)",ATTRIBS(), * "Add 32-bit registers", * { RdV=RsV+RtV;}) + * HVX instructions have the following form + * EXTINSN(V6_vinsertwr, "Vx32.w=vinsert(Rt32)", + * ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX), + * "Insert Word Scalar into Vector", + * VxV.uw[0] = RtV;) */ #define Q6INSN(TAG, BEH, ATTRIBS, DESCR, SEM) \ do { \ @@ -59,8 +64,23 @@ int main(int argc, char *argv[]) ")\n", \ #TAG, STRINGIZE(ATTRIBS)); \ } while (0); +#define EXTINSN(TAG, BEH, ATTRIBS, DESCR, SEM) \ + do { \ + fprintf(outfile, "SEMANTICS( \\\n" \ + " \"%s\", \\\n" \ + " %s, \\\n" \ + " \"\"\"%s\"\"\" \\\n" \ + ")\n", \ + #TAG, STRINGIZE(BEH), STRINGIZE(SEM)); \ + fprintf(outfile, "ATTRIBUTES( \\\n" \ + " \"%s\", \\\n" \ + " \"%s\" \\\n" \ + ")\n", \ + #TAG, STRINGIZE(ATTRIBS)); \ + } while (0); #include "imported/allidefs.def" #undef Q6INSN +#undef EXTINSN /* * Process the macro definitions @@ -81,6 +101,19 @@ int main(int argc, char *argv[]) ")\n", \ #MNAME, STRINGIZE(BEH), STRINGIZE(ATTRS)); #include "imported/macros.def" +#undef DEF_MACRO + +/* + * Process the macros for HVX + */ +#define DEF_MACRO(MNAME, BEH, ATTRS) \ + fprintf(outfile, "MACROATTRIB( \\\n" \ + " \"%s\", \\\n" \ + " \"\"\"%s\"\"\", \\\n" \ + " \"%s\" \\\n" \ + ")\n", \ + #MNAME, STRINGIZE(BEH), STRINGIZE(ATTRS)); +#include "imported/allext_macros.def" #undef DEF_MACRO fclose(outfile); diff --git a/target/hexagon/gen_tcg.h b/target/hexagon/gen_tcg.h index ee94c903db..c6f0879b6e 100644 --- a/target/hexagon/gen_tcg.h +++ b/target/hexagon/gen_tcg.h @@ -66,11 +66,10 @@ } while (0) #define GET_EA_pci \ do { \ - TCGv tcgv_siV = tcg_const_tl(siV); \ + TCGv tcgv_siV = tcg_constant_tl(siV); \ tcg_gen_mov_tl(EA, RxV); \ gen_helper_fcircadd(RxV, RxV, tcgv_siV, MuV, \ hex_gpr[HEX_REG_CS0 + MuN]); \ - tcg_temp_free(tcgv_siV); \ } while (0) #define GET_EA_pcr(SHIFT) \ do { \ @@ -557,7 +556,7 @@ #define fGEN_TCG_A4_addp_c(SHORTCODE) \ do { \ TCGv_i64 carry = tcg_temp_new_i64(); \ - TCGv_i64 zero = tcg_const_i64(0); \ + TCGv_i64 zero = tcg_constant_i64(0); \ tcg_gen_extu_i32_i64(carry, PxV); \ tcg_gen_andi_i64(carry, carry, 1); \ tcg_gen_add2_i64(RddV, carry, RssV, zero, carry, zero); \ @@ -565,14 +564,13 @@ tcg_gen_extrl_i64_i32(PxV, carry); \ gen_8bitsof(PxV, PxV); \ tcg_temp_free_i64(carry); \ - tcg_temp_free_i64(zero); \ } while (0) /* r5:4 = sub(r1:0, r3:2, p1):carry */ #define fGEN_TCG_A4_subp_c(SHORTCODE) \ do { \ TCGv_i64 carry = tcg_temp_new_i64(); \ - TCGv_i64 zero = tcg_const_i64(0); \ + TCGv_i64 zero = tcg_constant_i64(0); \ TCGv_i64 not_RttV = tcg_temp_new_i64(); \ tcg_gen_extu_i32_i64(carry, PxV); \ tcg_gen_andi_i64(carry, carry, 1); \ @@ -582,7 +580,6 @@ tcg_gen_extrl_i64_i32(PxV, carry); \ gen_8bitsof(PxV, PxV); \ tcg_temp_free_i64(carry); \ - tcg_temp_free_i64(zero); \ tcg_temp_free_i64(not_RttV); \ } while (0) @@ -684,9 +681,8 @@ gen_helper_sfmin(RdV, cpu_env, RsV, RtV) #define fGEN_TCG_F2_sfclass(SHORTCODE) \ do { \ - TCGv imm = tcg_const_tl(uiV); \ + TCGv imm = tcg_constant_tl(uiV); \ gen_helper_sfclass(PdV, cpu_env, RsV, imm); \ - tcg_temp_free(imm); \ } while (0) #define fGEN_TCG_F2_sffixupn(SHORTCODE) \ gen_helper_sffixupn(RdV, cpu_env, RsV, RtV) @@ -712,9 +708,8 @@ gen_helper_dfcmpuo(PdV, cpu_env, RssV, RttV) #define fGEN_TCG_F2_dfclass(SHORTCODE) \ do { \ - TCGv imm = tcg_const_tl(uiV); \ + TCGv imm = tcg_constant_tl(uiV); \ gen_helper_dfclass(PdV, cpu_env, RssV, imm); \ - tcg_temp_free(imm); \ } while (0) #define fGEN_TCG_F2_sfmpy(SHORTCODE) \ gen_helper_sfmpy(RdV, cpu_env, RsV, RtV) diff --git a/target/hexagon/gen_tcg_funcs.py b/target/hexagon/gen_tcg_funcs.py index 7ceb25b5f6..1fd9de95d5 100755 --- a/target/hexagon/gen_tcg_funcs.py +++ b/target/hexagon/gen_tcg_funcs.py @@ -119,10 +119,95 @@ def genptr_decl(f, tag, regtype, regid, regno): (regtype, regid, regtype, regid)) else: print("Bad register parse: ", regtype, regid) + elif (regtype == "V"): + if (regid in {"dd"}): + f.write(" const int %s%sN = insn->regno[%d];\n" %\ + (regtype, regid, regno)) + f.write(" const intptr_t %s%sV_off =\n" %\ + (regtype, regid)) + if (hex_common.is_tmp_result(tag)): + f.write(" ctx_tmp_vreg_off(ctx, %s%sN, 2, true);\n" % \ + (regtype, regid)) + else: + f.write(" ctx_future_vreg_off(ctx, %s%sN," % \ + (regtype, regid)) + f.write(" 2, true);\n") + if (not hex_common.skip_qemu_helper(tag)): + f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \ + (regtype, regid)) + f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \ + (regtype, regid, regtype, regid)) + elif (regid in {"uu", "vv", "xx"}): + f.write(" const int %s%sN = insn->regno[%d];\n" % \ + (regtype, regid, regno)) + f.write(" const intptr_t %s%sV_off =\n" % \ + (regtype, regid)) + f.write(" offsetof(CPUHexagonState, %s%sV);\n" % \ + (regtype, regid)) + if (not hex_common.skip_qemu_helper(tag)): + f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \ + (regtype, regid)) + f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \ + (regtype, regid, regtype, regid)) + elif (regid in {"s", "u", "v", "w"}): + f.write(" const int %s%sN = insn->regno[%d];\n" % \ + (regtype, regid, regno)) + f.write(" const intptr_t %s%sV_off =\n" % \ + (regtype, regid)) + f.write(" vreg_src_off(ctx, %s%sN);\n" % \ + (regtype, regid)) + if (not hex_common.skip_qemu_helper(tag)): + f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \ + (regtype, regid)) + elif (regid in {"d", "x", "y"}): + f.write(" const int %s%sN = insn->regno[%d];\n" % \ + (regtype, regid, regno)) + f.write(" const intptr_t %s%sV_off =\n" % \ + (regtype, regid)) + if (hex_common.is_tmp_result(tag)): + f.write(" ctx_tmp_vreg_off(ctx, %s%sN, 1, true);\n" % \ + (regtype, regid)) + else: + f.write(" ctx_future_vreg_off(ctx, %s%sN," % \ + (regtype, regid)) + f.write(" 1, true);\n"); + if (not hex_common.skip_qemu_helper(tag)): + f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \ + (regtype, regid)) + f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \ + (regtype, regid, regtype, regid)) + else: + print("Bad register parse: ", regtype, regid) + elif (regtype == "Q"): + if (regid in {"d", "e", "x"}): + f.write(" const int %s%sN = insn->regno[%d];\n" % \ + (regtype, regid, regno)) + f.write(" const intptr_t %s%sV_off =\n" % \ + (regtype, regid)) + f.write(" offsetof(CPUHexagonState,\n") + f.write(" future_QRegs[%s%sN]);\n" % \ + (regtype, regid)) + if (not hex_common.skip_qemu_helper(tag)): + f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \ + (regtype, regid)) + f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \ + (regtype, regid, regtype, regid)) + elif (regid in {"s", "t", "u", "v"}): + f.write(" const int %s%sN = insn->regno[%d];\n" % \ + (regtype, regid, regno)) + f.write(" const intptr_t %s%sV_off =\n" %\ + (regtype, regid)) + f.write(" offsetof(CPUHexagonState, QRegs[%s%sN]);\n" % \ + (regtype, regid)) + if (not hex_common.skip_qemu_helper(tag)): + f.write(" TCGv_ptr %s%sV = tcg_temp_new_ptr();\n" % \ + (regtype, regid)) + else: + print("Bad register parse: ", regtype, regid) else: print("Bad register parse: ", regtype, regid) -def genptr_decl_new(f,regtype,regid,regno): +def genptr_decl_new(f, tag, regtype, regid, regno): if (regtype == "N"): if (regid in {"s", "t"}): f.write(" TCGv %s%sN = hex_new_value[insn->regno[%d]];\n" % \ @@ -135,6 +220,21 @@ def genptr_decl_new(f,regtype,regid,regno): (regtype, regid, regno)) else: print("Bad register parse: ", regtype, regid) + elif (regtype == "O"): + if (regid == "s"): + f.write(" const intptr_t %s%sN_num = insn->regno[%d];\n" % \ + (regtype, regid, regno)) + if (hex_common.skip_qemu_helper(tag)): + f.write(" const intptr_t %s%sN_off =\n" % \ + (regtype, regid)) + f.write(" ctx_future_vreg_off(ctx, %s%sN_num," % \ + (regtype, regid)) + f.write(" 1, true);\n") + else: + f.write(" TCGv %s%sN = tcg_constant_tl(%s%sN_num);\n" % \ + (regtype, regid, regtype, regid)) + else: + print("Bad register parse: ", regtype, regid) else: print("Bad register parse: ", regtype, regid) @@ -145,7 +245,7 @@ def genptr_decl_opn(f, tag, regtype, regid, toss, numregs, i): if hex_common.is_old_val(regtype, regid, tag): genptr_decl(f,tag, regtype, regid, i) elif hex_common.is_new_val(regtype, regid, tag): - genptr_decl_new(f,regtype,regid,i) + genptr_decl_new(f, tag, regtype, regid, i) else: print("Bad register parse: ",regtype,regid,toss,numregs) else: @@ -159,7 +259,7 @@ def genptr_decl_imm(f,immlett): f.write(" int %s = insn->immed[%d];\n" % \ (hex_common.imm_name(immlett), i)) -def genptr_free(f,regtype,regid,regno): +def genptr_free(f, tag, regtype, regid, regno): if (regtype == "R"): if (regid in {"dd", "ss", "tt", "xx", "yy"}): f.write(" tcg_temp_free_i64(%s%sV);\n" % (regtype, regid)) @@ -182,33 +282,51 @@ def genptr_free(f,regtype,regid,regno): elif (regtype == "M"): if (regid != "u"): print("Bad register parse: ", regtype, regid) + elif (regtype == "V"): + if (regid in {"dd", "uu", "vv", "xx", \ + "d", "s", "u", "v", "w", "x", "y"}): + if (not hex_common.skip_qemu_helper(tag)): + f.write(" tcg_temp_free_ptr(%s%sV);\n" % \ + (regtype, regid)) + else: + print("Bad register parse: ", regtype, regid) + elif (regtype == "Q"): + if (regid in {"d", "e", "s", "t", "u", "v", "x"}): + if (not hex_common.skip_qemu_helper(tag)): + f.write(" tcg_temp_free_ptr(%s%sV);\n" % \ + (regtype, regid)) + else: + print("Bad register parse: ", regtype, regid) else: print("Bad register parse: ", regtype, regid) -def genptr_free_new(f,regtype,regid,regno): +def genptr_free_new(f, tag, regtype, regid, regno): if (regtype == "N"): if (regid not in {"s", "t"}): print("Bad register parse: ", regtype, regid) elif (regtype == "P"): if (regid not in {"t", "u", "v"}): print("Bad register parse: ", regtype, regid) + elif (regtype == "O"): + if (regid != "s"): + print("Bad register parse: ", regtype, regid) else: print("Bad register parse: ", regtype, regid) def genptr_free_opn(f,regtype,regid,i,tag): if (hex_common.is_pair(regid)): - genptr_free(f,regtype,regid,i) + genptr_free(f, tag, regtype, regid, i) elif (hex_common.is_single(regid)): if hex_common.is_old_val(regtype, regid, tag): - genptr_free(f,regtype,regid,i) + genptr_free(f, tag, regtype, regid, i) elif hex_common.is_new_val(regtype, regid, tag): - genptr_free_new(f,regtype,regid,i) + genptr_free_new(f, tag, regtype, regid, i) else: print("Bad register parse: ",regtype,regid,toss,numregs) else: print("Bad register parse: ",regtype,regid,toss,numregs) -def genptr_src_read(f,regtype,regid): +def genptr_src_read(f, tag, regtype, regid): if (regtype == "R"): if (regid in {"ss", "tt", "xx", "yy"}): f.write(" tcg_gen_concat_i32_i64(%s%sV, hex_gpr[%s%sN],\n" % \ @@ -238,6 +356,47 @@ def genptr_src_read(f,regtype,regid): elif (regtype == "M"): if (regid != "u"): print("Bad register parse: ", regtype, regid) + elif (regtype == "V"): + if (regid in {"uu", "vv", "xx"}): + f.write(" tcg_gen_gvec_mov(MO_64, %s%sV_off,\n" % \ + (regtype, regid)) + f.write(" vreg_src_off(ctx, %s%sN),\n" % \ + (regtype, regid)) + f.write(" sizeof(MMVector), sizeof(MMVector));\n") + f.write(" tcg_gen_gvec_mov(MO_64,\n") + f.write(" %s%sV_off + sizeof(MMVector),\n" % \ + (regtype, regid)) + f.write(" vreg_src_off(ctx, %s%sN ^ 1),\n" % \ + (regtype, regid)) + f.write(" sizeof(MMVector), sizeof(MMVector));\n") + elif (regid in {"s", "u", "v", "w"}): + if (not hex_common.skip_qemu_helper(tag)): + f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \ + (regtype, regid, regtype, regid)) + elif (regid in {"x", "y"}): + f.write(" tcg_gen_gvec_mov(MO_64, %s%sV_off,\n" % \ + (regtype, regid)) + f.write(" vreg_src_off(ctx, %s%sN),\n" % \ + (regtype, regid)) + f.write(" sizeof(MMVector), sizeof(MMVector));\n") + if (not hex_common.skip_qemu_helper(tag)): + f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \ + (regtype, regid, regtype, regid)) + else: + print("Bad register parse: ", regtype, regid) + elif (regtype == "Q"): + if (regid in {"s", "t", "u", "v"}): + if (not hex_common.skip_qemu_helper(tag)): + f.write(" tcg_gen_addi_ptr(%s%sV, cpu_env, %s%sV_off);\n" % \ + (regtype, regid, regtype, regid)) + elif (regid in {"x"}): + f.write(" tcg_gen_gvec_mov(MO_64, %s%sV_off,\n" % \ + (regtype, regid)) + f.write(" offsetof(CPUHexagonState, QRegs[%s%sN]),\n" % \ + (regtype, regid)) + f.write(" sizeof(MMQReg), sizeof(MMQReg));\n") + else: + print("Bad register parse: ", regtype, regid) else: print("Bad register parse: ", regtype, regid) @@ -248,15 +407,18 @@ def genptr_src_read_new(f,regtype,regid): elif (regtype == "P"): if (regid not in {"t", "u", "v"}): print("Bad register parse: ", regtype, regid) + elif (regtype == "O"): + if (regid != "s"): + print("Bad register parse: ", regtype, regid) else: print("Bad register parse: ", regtype, regid) def genptr_src_read_opn(f,regtype,regid,tag): if (hex_common.is_pair(regid)): - genptr_src_read(f,regtype,regid) + genptr_src_read(f, tag, regtype, regid) elif (hex_common.is_single(regid)): if hex_common.is_old_val(regtype, regid, tag): - genptr_src_read(f,regtype,regid) + genptr_src_read(f, tag, regtype, regid) elif hex_common.is_new_val(regtype, regid, tag): genptr_src_read_new(f,regtype,regid) else: @@ -279,15 +441,12 @@ def gen_helper_call_opn(f, tag, regtype, regid, toss, numregs, i): print("Bad register parse: ",regtype,regid,toss,numregs) def gen_helper_decl_imm(f,immlett): - f.write(" TCGv tcgv_%s = tcg_const_tl(%s);\n" % \ + f.write(" TCGv tcgv_%s = tcg_constant_tl(%s);\n" % \ (hex_common.imm_name(immlett), hex_common.imm_name(immlett))) def gen_helper_call_imm(f,immlett): f.write(", tcgv_%s" % hex_common.imm_name(immlett)) -def gen_helper_free_imm(f,immlett): - f.write(" tcg_temp_free(tcgv_%s);\n" % hex_common.imm_name(immlett)) - def genptr_dst_write_pair(f, tag, regtype, regid): if ('A_CONDEXEC' in hex_common.attribdict[tag]): f.write(" gen_log_predicated_reg_write_pair(%s%sN, %s%sV, insn->slot);\n" % \ @@ -334,11 +493,68 @@ def genptr_dst_write(f, tag, regtype, regid): else: print("Bad register parse: ", regtype, regid) +def genptr_dst_write_ext(f, tag, regtype, regid, newv="EXT_DFL"): + if (regtype == "V"): + if (regid in {"dd", "xx", "yy"}): + if ('A_CONDEXEC' in hex_common.attribdict[tag]): + is_predicated = "true" + else: + is_predicated = "false" + f.write(" gen_log_vreg_write_pair(ctx, %s%sV_off, %s%sN, " % \ + (regtype, regid, regtype, regid)) + f.write("%s, insn->slot, %s);\n" % \ + (newv, is_predicated)) + f.write(" ctx_log_vreg_write_pair(ctx, %s%sN, %s,\n" % \ + (regtype, regid, newv)) + f.write(" %s);\n" % (is_predicated)) + elif (regid in {"d", "x", "y"}): + if ('A_CONDEXEC' in hex_common.attribdict[tag]): + is_predicated = "true" + else: + is_predicated = "false" + f.write(" gen_log_vreg_write(ctx, %s%sV_off, %s%sN, %s, " % \ + (regtype, regid, regtype, regid, newv)) + f.write("insn->slot, %s);\n" % \ + (is_predicated)) + f.write(" ctx_log_vreg_write(ctx, %s%sN, %s, %s);\n" % \ + (regtype, regid, newv, is_predicated)) + else: + print("Bad register parse: ", regtype, regid) + elif (regtype == "Q"): + if (regid in {"d", "e", "x"}): + if ('A_CONDEXEC' in hex_common.attribdict[tag]): + is_predicated = "true" + else: + is_predicated = "false" + f.write(" gen_log_qreg_write(%s%sV_off, %s%sN, %s, " % \ + (regtype, regid, regtype, regid, newv)) + f.write("insn->slot, %s);\n" % (is_predicated)) + f.write(" ctx_log_qreg_write(ctx, %s%sN, %s);\n" % \ + (regtype, regid, is_predicated)) + else: + print("Bad register parse: ", regtype, regid) + else: + print("Bad register parse: ", regtype, regid) + def genptr_dst_write_opn(f,regtype, regid, tag): if (hex_common.is_pair(regid)): - genptr_dst_write(f, tag, regtype, regid) + if (hex_common.is_hvx_reg(regtype)): + if (hex_common.is_tmp_result(tag)): + genptr_dst_write_ext(f, tag, regtype, regid, "EXT_TMP") + else: + genptr_dst_write_ext(f, tag, regtype, regid) + else: + genptr_dst_write(f, tag, regtype, regid) elif (hex_common.is_single(regid)): - genptr_dst_write(f, tag, regtype, regid) + if (hex_common.is_hvx_reg(regtype)): + if (hex_common.is_new_result(tag)): + genptr_dst_write_ext(f, tag, regtype, regid, "EXT_NEW") + if (hex_common.is_tmp_result(tag)): + genptr_dst_write_ext(f, tag, regtype, regid, "EXT_TMP") + else: + genptr_dst_write_ext(f, tag, regtype, regid, "EXT_DFL") + else: + genptr_dst_write(f, tag, regtype, regid) else: print("Bad register parse: ",regtype,regid,toss,numregs) @@ -401,21 +617,32 @@ def gen_tcg_func(f, tag, regs, imms): for immlett,bits,immshift in imms: gen_helper_decl_imm(f,immlett) if hex_common.need_part1(tag): - f.write(" TCGv part1 = tcg_const_tl(insn->part1);\n") + f.write(" TCGv part1 = tcg_constant_tl(insn->part1);\n") if hex_common.need_slot(tag): - f.write(" TCGv slot = tcg_const_tl(insn->slot);\n") + f.write(" TCGv slot = tcg_constant_tl(insn->slot);\n") f.write(" gen_helper_%s(" % (tag)) i=0 ## If there is a scalar result, it is the return type for regtype,regid,toss,numregs in regs: if (hex_common.is_written(regid)): + if (hex_common.is_hvx_reg(regtype)): + continue gen_helper_call_opn(f, tag, regtype, regid, toss, numregs, i) i += 1 if (i > 0): f.write(", ") f.write("cpu_env") i=1 + for regtype,regid,toss,numregs in regs: + if (hex_common.is_written(regid)): + if (not hex_common.is_hvx_reg(regtype)): + continue + gen_helper_call_opn(f, tag, regtype, regid, toss, numregs, i) + i += 1 for regtype,regid,toss,numregs in regs: if (hex_common.is_read(regid)): + if (hex_common.is_hvx_reg(regtype) and + hex_common.is_readwrite(regid)): + continue gen_helper_call_opn(f, tag, regtype, regid, toss, numregs, i) i += 1 for immlett,bits,immshift in imms: @@ -424,12 +651,6 @@ def gen_tcg_func(f, tag, regs, imms): if hex_common.need_slot(tag): f.write(", slot") if hex_common.need_part1(tag): f.write(", part1" ) f.write(");\n") - if hex_common.need_slot(tag): - f.write(" tcg_temp_free(slot);\n") - if hex_common.need_part1(tag): - f.write(" tcg_temp_free(part1);\n") - for immlett,bits,immshift in imms: - gen_helper_free_imm(f,immlett) ## Write all the outputs for regtype,regid,toss,numregs in regs: @@ -454,11 +675,12 @@ def main(): hex_common.read_semantics_file(sys.argv[1]) hex_common.read_attribs_file(sys.argv[2]) hex_common.read_overrides_file(sys.argv[3]) + hex_common.read_overrides_file(sys.argv[4]) hex_common.calculate_attribs() tagregs = hex_common.get_tagregs() tagimms = hex_common.get_tagimms() - with open(sys.argv[4], 'w') as f: + with open(sys.argv[5], 'w') as f: f.write("#ifndef HEXAGON_TCG_FUNCS_H\n") f.write("#define HEXAGON_TCG_FUNCS_H\n\n") diff --git a/target/hexagon/gen_tcg_hvx.h b/target/hexagon/gen_tcg_hvx.h new file mode 100644 index 0000000000..cdcc9382bb --- /dev/null +++ b/target/hexagon/gen_tcg_hvx.h @@ -0,0 +1,903 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef HEXAGON_GEN_TCG_HVX_H +#define HEXAGON_GEN_TCG_HVX_H + +/* + * Histogram instructions + * + * Note that these instructions operate directly on the vector registers + * and therefore happen after commit. + * + * The generate_ function is called twice + * The first time is during the normal TCG generation + * ctx->pre_commit is true + * In the masked cases, we save the mask to the qtmp temporary + * Otherwise, there is nothing to do + * The second call is at the end of gen_commit_packet + * ctx->pre_commit is false + * Generate the call to the helper + */ + +static inline void assert_vhist_tmp(DisasContext *ctx) +{ + /* vhist instructions require exactly one .tmp to be defined */ + g_assert(ctx->tmp_vregs_idx == 1); +} + +#define fGEN_TCG_V6_vhist(SHORTCODE) \ + if (!ctx->pre_commit) { \ + assert_vhist_tmp(ctx); \ + gen_helper_vhist(cpu_env); \ + } +#define fGEN_TCG_V6_vhistq(SHORTCODE) \ + do { \ + if (ctx->pre_commit) { \ + intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \ + tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \ + sizeof(MMVector), sizeof(MMVector)); \ + } else { \ + assert_vhist_tmp(ctx); \ + gen_helper_vhistq(cpu_env); \ + } \ + } while (0) +#define fGEN_TCG_V6_vwhist256(SHORTCODE) \ + if (!ctx->pre_commit) { \ + assert_vhist_tmp(ctx); \ + gen_helper_vwhist256(cpu_env); \ + } +#define fGEN_TCG_V6_vwhist256q(SHORTCODE) \ + do { \ + if (ctx->pre_commit) { \ + intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \ + tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \ + sizeof(MMVector), sizeof(MMVector)); \ + } else { \ + assert_vhist_tmp(ctx); \ + gen_helper_vwhist256q(cpu_env); \ + } \ + } while (0) +#define fGEN_TCG_V6_vwhist256_sat(SHORTCODE) \ + if (!ctx->pre_commit) { \ + assert_vhist_tmp(ctx); \ + gen_helper_vwhist256_sat(cpu_env); \ + } +#define fGEN_TCG_V6_vwhist256q_sat(SHORTCODE) \ + do { \ + if (ctx->pre_commit) { \ + intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \ + tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \ + sizeof(MMVector), sizeof(MMVector)); \ + } else { \ + assert_vhist_tmp(ctx); \ + gen_helper_vwhist256q_sat(cpu_env); \ + } \ + } while (0) +#define fGEN_TCG_V6_vwhist128(SHORTCODE) \ + if (!ctx->pre_commit) { \ + assert_vhist_tmp(ctx); \ + gen_helper_vwhist128(cpu_env); \ + } +#define fGEN_TCG_V6_vwhist128q(SHORTCODE) \ + do { \ + if (ctx->pre_commit) { \ + intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \ + tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \ + sizeof(MMVector), sizeof(MMVector)); \ + } else { \ + assert_vhist_tmp(ctx); \ + gen_helper_vwhist128q(cpu_env); \ + } \ + } while (0) +#define fGEN_TCG_V6_vwhist128m(SHORTCODE) \ + if (!ctx->pre_commit) { \ + TCGv tcgv_uiV = tcg_constant_tl(uiV); \ + assert_vhist_tmp(ctx); \ + gen_helper_vwhist128m(cpu_env, tcgv_uiV); \ + } +#define fGEN_TCG_V6_vwhist128qm(SHORTCODE) \ + do { \ + if (ctx->pre_commit) { \ + intptr_t dstoff = offsetof(CPUHexagonState, qtmp); \ + tcg_gen_gvec_mov(MO_64, dstoff, QvV_off, \ + sizeof(MMVector), sizeof(MMVector)); \ + } else { \ + TCGv tcgv_uiV = tcg_constant_tl(uiV); \ + assert_vhist_tmp(ctx); \ + gen_helper_vwhist128qm(cpu_env, tcgv_uiV); \ + } \ + } while (0) + + +#define fGEN_TCG_V6_vassign(SHORTCODE) \ + tcg_gen_gvec_mov(MO_64, VdV_off, VuV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +/* Vector conditional move */ +#define fGEN_TCG_VEC_CMOV(PRED) \ + do { \ + TCGv lsb = tcg_temp_new(); \ + TCGLabel *false_label = gen_new_label(); \ + TCGLabel *end_label = gen_new_label(); \ + tcg_gen_andi_tl(lsb, PsV, 1); \ + tcg_gen_brcondi_tl(TCG_COND_NE, lsb, PRED, false_label); \ + tcg_temp_free(lsb); \ + tcg_gen_gvec_mov(MO_64, VdV_off, VuV_off, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_gen_br(end_label); \ + gen_set_label(false_label); \ + tcg_gen_ori_tl(hex_slot_cancelled, hex_slot_cancelled, \ + 1 << insn->slot); \ + gen_set_label(end_label); \ + } while (0) + + +/* Vector conditional move (true) */ +#define fGEN_TCG_V6_vcmov(SHORTCODE) \ + fGEN_TCG_VEC_CMOV(1) + +/* Vector conditional move (false) */ +#define fGEN_TCG_V6_vncmov(SHORTCODE) \ + fGEN_TCG_VEC_CMOV(0) + +/* Vector add - various forms */ +#define fGEN_TCG_V6_vaddb(SHORTCODE) \ + tcg_gen_gvec_add(MO_8, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +#define fGEN_TCG_V6_vaddh(SHORTCYDE) \ + tcg_gen_gvec_add(MO_16, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +#define fGEN_TCG_V6_vaddw(SHORTCODE) \ + tcg_gen_gvec_add(MO_32, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +#define fGEN_TCG_V6_vaddb_dv(SHORTCODE) \ + tcg_gen_gvec_add(MO_8, VddV_off, VuuV_off, VvvV_off, \ + sizeof(MMVector) * 2, sizeof(MMVector) * 2) + +#define fGEN_TCG_V6_vaddh_dv(SHORTCYDE) \ + tcg_gen_gvec_add(MO_16, VddV_off, VuuV_off, VvvV_off, \ + sizeof(MMVector) * 2, sizeof(MMVector) * 2) + +#define fGEN_TCG_V6_vaddw_dv(SHORTCODE) \ + tcg_gen_gvec_add(MO_32, VddV_off, VuuV_off, VvvV_off, \ + sizeof(MMVector) * 2, sizeof(MMVector) * 2) + +/* Vector sub - various forms */ +#define fGEN_TCG_V6_vsubb(SHORTCODE) \ + tcg_gen_gvec_sub(MO_8, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +#define fGEN_TCG_V6_vsubh(SHORTCODE) \ + tcg_gen_gvec_sub(MO_16, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +#define fGEN_TCG_V6_vsubw(SHORTCODE) \ + tcg_gen_gvec_sub(MO_32, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +#define fGEN_TCG_V6_vsubb_dv(SHORTCODE) \ + tcg_gen_gvec_sub(MO_8, VddV_off, VuuV_off, VvvV_off, \ + sizeof(MMVector) * 2, sizeof(MMVector) * 2) + +#define fGEN_TCG_V6_vsubh_dv(SHORTCODE) \ + tcg_gen_gvec_sub(MO_16, VddV_off, VuuV_off, VvvV_off, \ + sizeof(MMVector) * 2, sizeof(MMVector) * 2) + +#define fGEN_TCG_V6_vsubw_dv(SHORTCODE) \ + tcg_gen_gvec_sub(MO_32, VddV_off, VuuV_off, VvvV_off, \ + sizeof(MMVector) * 2, sizeof(MMVector) * 2) + +/* Vector shift right - various forms */ +#define fGEN_TCG_V6_vasrh(SHORTCODE) \ + do { \ + TCGv shift = tcg_temp_new(); \ + tcg_gen_andi_tl(shift, RtV, 15); \ + tcg_gen_gvec_sars(MO_16, VdV_off, VuV_off, shift, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_temp_free(shift); \ + } while (0) + +#define fGEN_TCG_V6_vasrh_acc(SHORTCODE) \ + do { \ + intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \ + TCGv shift = tcg_temp_new(); \ + tcg_gen_andi_tl(shift, RtV, 15); \ + tcg_gen_gvec_sars(MO_16, tmpoff, VuV_off, shift, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_gen_gvec_add(MO_16, VxV_off, VxV_off, tmpoff, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_temp_free(shift); \ + } while (0) + +#define fGEN_TCG_V6_vasrw(SHORTCODE) \ + do { \ + TCGv shift = tcg_temp_new(); \ + tcg_gen_andi_tl(shift, RtV, 31); \ + tcg_gen_gvec_sars(MO_32, VdV_off, VuV_off, shift, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_temp_free(shift); \ + } while (0) + +#define fGEN_TCG_V6_vasrw_acc(SHORTCODE) \ + do { \ + intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \ + TCGv shift = tcg_temp_new(); \ + tcg_gen_andi_tl(shift, RtV, 31); \ + tcg_gen_gvec_sars(MO_32, tmpoff, VuV_off, shift, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_gen_gvec_add(MO_32, VxV_off, VxV_off, tmpoff, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_temp_free(shift); \ + } while (0) + +#define fGEN_TCG_V6_vlsrb(SHORTCODE) \ + do { \ + TCGv shift = tcg_temp_new(); \ + tcg_gen_andi_tl(shift, RtV, 7); \ + tcg_gen_gvec_shrs(MO_8, VdV_off, VuV_off, shift, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_temp_free(shift); \ + } while (0) + +#define fGEN_TCG_V6_vlsrh(SHORTCODE) \ + do { \ + TCGv shift = tcg_temp_new(); \ + tcg_gen_andi_tl(shift, RtV, 15); \ + tcg_gen_gvec_shrs(MO_16, VdV_off, VuV_off, shift, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_temp_free(shift); \ + } while (0) + +#define fGEN_TCG_V6_vlsrw(SHORTCODE) \ + do { \ + TCGv shift = tcg_temp_new(); \ + tcg_gen_andi_tl(shift, RtV, 31); \ + tcg_gen_gvec_shrs(MO_32, VdV_off, VuV_off, shift, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_temp_free(shift); \ + } while (0) + +/* Vector shift left - various forms */ +#define fGEN_TCG_V6_vaslb(SHORTCODE) \ + do { \ + TCGv shift = tcg_temp_new(); \ + tcg_gen_andi_tl(shift, RtV, 7); \ + tcg_gen_gvec_shls(MO_8, VdV_off, VuV_off, shift, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_temp_free(shift); \ + } while (0) + +#define fGEN_TCG_V6_vaslh(SHORTCODE) \ + do { \ + TCGv shift = tcg_temp_new(); \ + tcg_gen_andi_tl(shift, RtV, 15); \ + tcg_gen_gvec_shls(MO_16, VdV_off, VuV_off, shift, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_temp_free(shift); \ + } while (0) + +#define fGEN_TCG_V6_vaslh_acc(SHORTCODE) \ + do { \ + intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \ + TCGv shift = tcg_temp_new(); \ + tcg_gen_andi_tl(shift, RtV, 15); \ + tcg_gen_gvec_shls(MO_16, tmpoff, VuV_off, shift, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_gen_gvec_add(MO_16, VxV_off, VxV_off, tmpoff, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_temp_free(shift); \ + } while (0) + +#define fGEN_TCG_V6_vaslw(SHORTCODE) \ + do { \ + TCGv shift = tcg_temp_new(); \ + tcg_gen_andi_tl(shift, RtV, 31); \ + tcg_gen_gvec_shls(MO_32, VdV_off, VuV_off, shift, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_temp_free(shift); \ + } while (0) + +#define fGEN_TCG_V6_vaslw_acc(SHORTCODE) \ + do { \ + intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \ + TCGv shift = tcg_temp_new(); \ + tcg_gen_andi_tl(shift, RtV, 31); \ + tcg_gen_gvec_shls(MO_32, tmpoff, VuV_off, shift, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_gen_gvec_add(MO_32, VxV_off, VxV_off, tmpoff, \ + sizeof(MMVector), sizeof(MMVector)); \ + tcg_temp_free(shift); \ + } while (0) + +/* Vector max - various forms */ +#define fGEN_TCG_V6_vmaxw(SHORTCODE) \ + tcg_gen_gvec_smax(MO_32, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) +#define fGEN_TCG_V6_vmaxh(SHORTCODE) \ + tcg_gen_gvec_smax(MO_16, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) +#define fGEN_TCG_V6_vmaxuh(SHORTCODE) \ + tcg_gen_gvec_umax(MO_16, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) +#define fGEN_TCG_V6_vmaxb(SHORTCODE) \ + tcg_gen_gvec_smax(MO_8, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) +#define fGEN_TCG_V6_vmaxub(SHORTCODE) \ + tcg_gen_gvec_umax(MO_8, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +/* Vector min - various forms */ +#define fGEN_TCG_V6_vminw(SHORTCODE) \ + tcg_gen_gvec_smin(MO_32, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) +#define fGEN_TCG_V6_vminh(SHORTCODE) \ + tcg_gen_gvec_smin(MO_16, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) +#define fGEN_TCG_V6_vminuh(SHORTCODE) \ + tcg_gen_gvec_umin(MO_16, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) +#define fGEN_TCG_V6_vminb(SHORTCODE) \ + tcg_gen_gvec_smin(MO_8, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) +#define fGEN_TCG_V6_vminub(SHORTCODE) \ + tcg_gen_gvec_umin(MO_8, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +/* Vector logical ops */ +#define fGEN_TCG_V6_vxor(SHORTCODE) \ + tcg_gen_gvec_xor(MO_64, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +#define fGEN_TCG_V6_vand(SHORTCODE) \ + tcg_gen_gvec_and(MO_64, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +#define fGEN_TCG_V6_vor(SHORTCODE) \ + tcg_gen_gvec_or(MO_64, VdV_off, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +#define fGEN_TCG_V6_vnot(SHORTCODE) \ + tcg_gen_gvec_not(MO_64, VdV_off, VuV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +/* Q register logical ops */ +#define fGEN_TCG_V6_pred_or(SHORTCODE) \ + tcg_gen_gvec_or(MO_64, QdV_off, QsV_off, QtV_off, \ + sizeof(MMQReg), sizeof(MMQReg)) + +#define fGEN_TCG_V6_pred_and(SHORTCODE) \ + tcg_gen_gvec_and(MO_64, QdV_off, QsV_off, QtV_off, \ + sizeof(MMQReg), sizeof(MMQReg)) + +#define fGEN_TCG_V6_pred_xor(SHORTCODE) \ + tcg_gen_gvec_xor(MO_64, QdV_off, QsV_off, QtV_off, \ + sizeof(MMQReg), sizeof(MMQReg)) + +#define fGEN_TCG_V6_pred_or_n(SHORTCODE) \ + tcg_gen_gvec_orc(MO_64, QdV_off, QsV_off, QtV_off, \ + sizeof(MMQReg), sizeof(MMQReg)) + +#define fGEN_TCG_V6_pred_and_n(SHORTCODE) \ + tcg_gen_gvec_andc(MO_64, QdV_off, QsV_off, QtV_off, \ + sizeof(MMQReg), sizeof(MMQReg)) + +#define fGEN_TCG_V6_pred_not(SHORTCODE) \ + tcg_gen_gvec_not(MO_64, QdV_off, QsV_off, \ + sizeof(MMQReg), sizeof(MMQReg)) + +/* Vector compares */ +#define fGEN_TCG_VEC_CMP(COND, TYPE, SIZE) \ + do { \ + intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \ + tcg_gen_gvec_cmp(COND, TYPE, tmpoff, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)); \ + vec_to_qvec(SIZE, QdV_off, tmpoff); \ + } while (0) + +#define fGEN_TCG_V6_vgtw(SHORTCODE) \ + fGEN_TCG_VEC_CMP(TCG_COND_GT, MO_32, 4) +#define fGEN_TCG_V6_vgth(SHORTCODE) \ + fGEN_TCG_VEC_CMP(TCG_COND_GT, MO_16, 2) +#define fGEN_TCG_V6_vgtb(SHORTCODE) \ + fGEN_TCG_VEC_CMP(TCG_COND_GT, MO_8, 1) + +#define fGEN_TCG_V6_vgtuw(SHORTCODE) \ + fGEN_TCG_VEC_CMP(TCG_COND_GTU, MO_32, 4) +#define fGEN_TCG_V6_vgtuh(SHORTCODE) \ + fGEN_TCG_VEC_CMP(TCG_COND_GTU, MO_16, 2) +#define fGEN_TCG_V6_vgtub(SHORTCODE) \ + fGEN_TCG_VEC_CMP(TCG_COND_GTU, MO_8, 1) + +#define fGEN_TCG_V6_veqw(SHORTCODE) \ + fGEN_TCG_VEC_CMP(TCG_COND_EQ, MO_32, 4) +#define fGEN_TCG_V6_veqh(SHORTCODE) \ + fGEN_TCG_VEC_CMP(TCG_COND_EQ, MO_16, 2) +#define fGEN_TCG_V6_veqb(SHORTCODE) \ + fGEN_TCG_VEC_CMP(TCG_COND_EQ, MO_8, 1) + +#define fGEN_TCG_VEC_CMP_OP(COND, TYPE, SIZE, OP) \ + do { \ + intptr_t tmpoff = offsetof(CPUHexagonState, vtmp); \ + intptr_t qoff = offsetof(CPUHexagonState, qtmp); \ + tcg_gen_gvec_cmp(COND, TYPE, tmpoff, VuV_off, VvV_off, \ + sizeof(MMVector), sizeof(MMVector)); \ + vec_to_qvec(SIZE, qoff, tmpoff); \ + OP(MO_64, QxV_off, QxV_off, qoff, sizeof(MMQReg), sizeof(MMQReg)); \ + } while (0) + +#define fGEN_TCG_V6_vgtw_and(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_32, 4, tcg_gen_gvec_and) +#define fGEN_TCG_V6_vgtw_or(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_32, 4, tcg_gen_gvec_or) +#define fGEN_TCG_V6_vgtw_xor(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_32, 4, tcg_gen_gvec_xor) + +#define fGEN_TCG_V6_vgtuw_and(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_32, 4, tcg_gen_gvec_and) +#define fGEN_TCG_V6_vgtuw_or(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_32, 4, tcg_gen_gvec_or) +#define fGEN_TCG_V6_vgtuw_xor(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_32, 4, tcg_gen_gvec_xor) + +#define fGEN_TCG_V6_vgth_and(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_16, 2, tcg_gen_gvec_and) +#define fGEN_TCG_V6_vgth_or(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_16, 2, tcg_gen_gvec_or) +#define fGEN_TCG_V6_vgth_xor(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_16, 2, tcg_gen_gvec_xor) + +#define fGEN_TCG_V6_vgtuh_and(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_16, 2, tcg_gen_gvec_and) +#define fGEN_TCG_V6_vgtuh_or(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_16, 2, tcg_gen_gvec_or) +#define fGEN_TCG_V6_vgtuh_xor(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_16, 2, tcg_gen_gvec_xor) + +#define fGEN_TCG_V6_vgtb_and(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_8, 1, tcg_gen_gvec_and) +#define fGEN_TCG_V6_vgtb_or(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_8, 1, tcg_gen_gvec_or) +#define fGEN_TCG_V6_vgtb_xor(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GT, MO_8, 1, tcg_gen_gvec_xor) + +#define fGEN_TCG_V6_vgtub_and(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_8, 1, tcg_gen_gvec_and) +#define fGEN_TCG_V6_vgtub_or(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_8, 1, tcg_gen_gvec_or) +#define fGEN_TCG_V6_vgtub_xor(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_GTU, MO_8, 1, tcg_gen_gvec_xor) + +#define fGEN_TCG_V6_veqw_and(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_32, 4, tcg_gen_gvec_and) +#define fGEN_TCG_V6_veqw_or(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_32, 4, tcg_gen_gvec_or) +#define fGEN_TCG_V6_veqw_xor(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_32, 4, tcg_gen_gvec_xor) + +#define fGEN_TCG_V6_veqh_and(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_16, 2, tcg_gen_gvec_and) +#define fGEN_TCG_V6_veqh_or(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_16, 2, tcg_gen_gvec_or) +#define fGEN_TCG_V6_veqh_xor(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_16, 2, tcg_gen_gvec_xor) + +#define fGEN_TCG_V6_veqb_and(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_8, 1, tcg_gen_gvec_and) +#define fGEN_TCG_V6_veqb_or(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_8, 1, tcg_gen_gvec_or) +#define fGEN_TCG_V6_veqb_xor(SHORTCODE) \ + fGEN_TCG_VEC_CMP_OP(TCG_COND_EQ, MO_8, 1, tcg_gen_gvec_xor) + +/* Vector splat - various forms */ +#define fGEN_TCG_V6_lvsplatw(SHORTCODE) \ + tcg_gen_gvec_dup_i32(MO_32, VdV_off, \ + sizeof(MMVector), sizeof(MMVector), RtV) + +#define fGEN_TCG_V6_lvsplath(SHORTCODE) \ + tcg_gen_gvec_dup_i32(MO_16, VdV_off, \ + sizeof(MMVector), sizeof(MMVector), RtV) + +#define fGEN_TCG_V6_lvsplatb(SHORTCODE) \ + tcg_gen_gvec_dup_i32(MO_8, VdV_off, \ + sizeof(MMVector), sizeof(MMVector), RtV) + +/* Vector absolute value - various forms */ +#define fGEN_TCG_V6_vabsb(SHORTCODE) \ + tcg_gen_gvec_abs(MO_8, VdV_off, VuV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +#define fGEN_TCG_V6_vabsh(SHORTCODE) \ + tcg_gen_gvec_abs(MO_16, VdV_off, VuV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +#define fGEN_TCG_V6_vabsw(SHORTCODE) \ + tcg_gen_gvec_abs(MO_32, VdV_off, VuV_off, \ + sizeof(MMVector), sizeof(MMVector)) + +/* Vector loads */ +#define fGEN_TCG_V6_vL32b_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32Ub_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_cur_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_tmp_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_nt_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_nt_cur_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_nt_tmp_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32Ub_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_cur_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_tmp_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_nt_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_nt_cur_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_nt_tmp_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_ppu(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32Ub_ppu(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_cur_ppu(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_tmp_ppu(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_nt_ppu(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_nt_cur_ppu(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vL32b_nt_tmp_ppu(SHORTCODE) SHORTCODE + +/* Predicated vector loads */ +#define fGEN_TCG_PRED_VEC_LOAD(GET_EA, PRED, DSTOFF, INC) \ + do { \ + TCGv LSB = tcg_temp_new(); \ + TCGLabel *false_label = gen_new_label(); \ + TCGLabel *end_label = gen_new_label(); \ + GET_EA; \ + PRED; \ + tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, false_label); \ + tcg_temp_free(LSB); \ + gen_vreg_load(ctx, DSTOFF, EA, true); \ + INC; \ + tcg_gen_br(end_label); \ + gen_set_label(false_label); \ + tcg_gen_ori_tl(hex_slot_cancelled, hex_slot_cancelled, \ + 1 << insn->slot); \ + gen_set_label(end_label); \ + } while (0) + +#define fGEN_TCG_PRED_VEC_LOAD_pred_pi \ + fGEN_TCG_PRED_VEC_LOAD(fLSBOLD(PvV), \ + fEA_REG(RxV), \ + VdV_off, \ + fPM_I(RxV, siV * sizeof(MMVector))) +#define fGEN_TCG_PRED_VEC_LOAD_npred_pi \ + fGEN_TCG_PRED_VEC_LOAD(fLSBOLDNOT(PvV), \ + fEA_REG(RxV), \ + VdV_off, \ + fPM_I(RxV, siV * sizeof(MMVector))) + +#define fGEN_TCG_V6_vL32b_pred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_pi +#define fGEN_TCG_V6_vL32b_npred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_pi +#define fGEN_TCG_V6_vL32b_cur_pred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_pi +#define fGEN_TCG_V6_vL32b_cur_npred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_pi +#define fGEN_TCG_V6_vL32b_tmp_pred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_pi +#define fGEN_TCG_V6_vL32b_tmp_npred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_pi +#define fGEN_TCG_V6_vL32b_nt_pred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_pi +#define fGEN_TCG_V6_vL32b_nt_npred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_pi +#define fGEN_TCG_V6_vL32b_nt_cur_pred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_pi +#define fGEN_TCG_V6_vL32b_nt_cur_npred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_pi +#define fGEN_TCG_V6_vL32b_nt_tmp_pred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_pi +#define fGEN_TCG_V6_vL32b_nt_tmp_npred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_pi + +#define fGEN_TCG_PRED_VEC_LOAD_pred_ai \ + fGEN_TCG_PRED_VEC_LOAD(fLSBOLD(PvV), \ + fEA_RI(RtV, siV * sizeof(MMVector)), \ + VdV_off, \ + do {} while (0)) +#define fGEN_TCG_PRED_VEC_LOAD_npred_ai \ + fGEN_TCG_PRED_VEC_LOAD(fLSBOLDNOT(PvV), \ + fEA_RI(RtV, siV * sizeof(MMVector)), \ + VdV_off, \ + do {} while (0)) + +#define fGEN_TCG_V6_vL32b_pred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_ai +#define fGEN_TCG_V6_vL32b_npred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_ai +#define fGEN_TCG_V6_vL32b_cur_pred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_ai +#define fGEN_TCG_V6_vL32b_cur_npred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_ai +#define fGEN_TCG_V6_vL32b_tmp_pred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_ai +#define fGEN_TCG_V6_vL32b_tmp_npred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_ai +#define fGEN_TCG_V6_vL32b_nt_pred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_ai +#define fGEN_TCG_V6_vL32b_nt_npred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_ai +#define fGEN_TCG_V6_vL32b_nt_cur_pred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_ai +#define fGEN_TCG_V6_vL32b_nt_cur_npred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_ai +#define fGEN_TCG_V6_vL32b_nt_tmp_pred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_ai +#define fGEN_TCG_V6_vL32b_nt_tmp_npred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_ai + +#define fGEN_TCG_PRED_VEC_LOAD_pred_ppu \ + fGEN_TCG_PRED_VEC_LOAD(fLSBOLD(PvV), \ + fEA_REG(RxV), \ + VdV_off, \ + fPM_M(RxV, MuV)) +#define fGEN_TCG_PRED_VEC_LOAD_npred_ppu \ + fGEN_TCG_PRED_VEC_LOAD(fLSBOLDNOT(PvV), \ + fEA_REG(RxV), \ + VdV_off, \ + fPM_M(RxV, MuV)) + +#define fGEN_TCG_V6_vL32b_pred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_ppu +#define fGEN_TCG_V6_vL32b_npred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_ppu +#define fGEN_TCG_V6_vL32b_cur_pred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_ppu +#define fGEN_TCG_V6_vL32b_cur_npred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_ppu +#define fGEN_TCG_V6_vL32b_tmp_pred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_ppu +#define fGEN_TCG_V6_vL32b_tmp_npred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_ppu +#define fGEN_TCG_V6_vL32b_nt_pred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_ppu +#define fGEN_TCG_V6_vL32b_nt_npred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_ppu +#define fGEN_TCG_V6_vL32b_nt_cur_pred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_ppu +#define fGEN_TCG_V6_vL32b_nt_cur_npred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_ppu +#define fGEN_TCG_V6_vL32b_nt_tmp_pred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_pred_ppu +#define fGEN_TCG_V6_vL32b_nt_tmp_npred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_LOAD_npred_ppu + +/* Vector stores */ +#define fGEN_TCG_V6_vS32b_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32Ub_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_nt_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32Ub_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_nt_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_ppu(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32Ub_ppu(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_nt_ppu(SHORTCODE) SHORTCODE + +/* New value vector stores */ +#define fGEN_TCG_NEWVAL_VEC_STORE(GET_EA, INC) \ + do { \ + GET_EA; \ + gen_vreg_store(ctx, insn, pkt, EA, OsN_off, insn->slot, true); \ + INC; \ + } while (0) + +#define fGEN_TCG_NEWVAL_VEC_STORE_pi \ + fGEN_TCG_NEWVAL_VEC_STORE(fEA_REG(RxV), fPM_I(RxV, siV * sizeof(MMVector))) + +#define fGEN_TCG_V6_vS32b_new_pi(SHORTCODE) \ + fGEN_TCG_NEWVAL_VEC_STORE_pi +#define fGEN_TCG_V6_vS32b_nt_new_pi(SHORTCODE) \ + fGEN_TCG_NEWVAL_VEC_STORE_pi + +#define fGEN_TCG_NEWVAL_VEC_STORE_ai \ + fGEN_TCG_NEWVAL_VEC_STORE(fEA_RI(RtV, siV * sizeof(MMVector)), \ + do { } while (0)) + +#define fGEN_TCG_V6_vS32b_new_ai(SHORTCODE) \ + fGEN_TCG_NEWVAL_VEC_STORE_ai +#define fGEN_TCG_V6_vS32b_nt_new_ai(SHORTCODE) \ + fGEN_TCG_NEWVAL_VEC_STORE_ai + +#define fGEN_TCG_NEWVAL_VEC_STORE_ppu \ + fGEN_TCG_NEWVAL_VEC_STORE(fEA_REG(RxV), fPM_M(RxV, MuV)) + +#define fGEN_TCG_V6_vS32b_new_ppu(SHORTCODE) \ + fGEN_TCG_NEWVAL_VEC_STORE_ppu +#define fGEN_TCG_V6_vS32b_nt_new_ppu(SHORTCODE) \ + fGEN_TCG_NEWVAL_VEC_STORE_ppu + +/* Predicated vector stores */ +#define fGEN_TCG_PRED_VEC_STORE(GET_EA, PRED, SRCOFF, ALIGN, INC) \ + do { \ + TCGv LSB = tcg_temp_new(); \ + TCGLabel *false_label = gen_new_label(); \ + TCGLabel *end_label = gen_new_label(); \ + GET_EA; \ + PRED; \ + tcg_gen_brcondi_tl(TCG_COND_EQ, LSB, 0, false_label); \ + tcg_temp_free(LSB); \ + gen_vreg_store(ctx, insn, pkt, EA, SRCOFF, insn->slot, ALIGN); \ + INC; \ + tcg_gen_br(end_label); \ + gen_set_label(false_label); \ + tcg_gen_ori_tl(hex_slot_cancelled, hex_slot_cancelled, \ + 1 << insn->slot); \ + gen_set_label(end_label); \ + } while (0) + +#define fGEN_TCG_PRED_VEC_STORE_pred_pi(ALIGN) \ + fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \ + fEA_REG(RxV), \ + VsV_off, ALIGN, \ + fPM_I(RxV, siV * sizeof(MMVector))) +#define fGEN_TCG_PRED_VEC_STORE_npred_pi(ALIGN) \ + fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \ + fEA_REG(RxV), \ + VsV_off, ALIGN, \ + fPM_I(RxV, siV * sizeof(MMVector))) +#define fGEN_TCG_PRED_VEC_STORE_new_pred_pi \ + fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \ + fEA_REG(RxV), \ + OsN_off, true, \ + fPM_I(RxV, siV * sizeof(MMVector))) +#define fGEN_TCG_PRED_VEC_STORE_new_npred_pi \ + fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \ + fEA_REG(RxV), \ + OsN_off, true, \ + fPM_I(RxV, siV * sizeof(MMVector))) + +#define fGEN_TCG_V6_vS32b_pred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_pred_pi(true) +#define fGEN_TCG_V6_vS32b_npred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_npred_pi(true) +#define fGEN_TCG_V6_vS32Ub_pred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_pred_pi(false) +#define fGEN_TCG_V6_vS32Ub_npred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_npred_pi(false) +#define fGEN_TCG_V6_vS32b_nt_pred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_pred_pi(true) +#define fGEN_TCG_V6_vS32b_nt_npred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_npred_pi(true) +#define fGEN_TCG_V6_vS32b_new_pred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_new_pred_pi +#define fGEN_TCG_V6_vS32b_new_npred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_new_npred_pi +#define fGEN_TCG_V6_vS32b_nt_new_pred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_new_pred_pi +#define fGEN_TCG_V6_vS32b_nt_new_npred_pi(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_new_npred_pi + +#define fGEN_TCG_PRED_VEC_STORE_pred_ai(ALIGN) \ + fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \ + fEA_RI(RtV, siV * sizeof(MMVector)), \ + VsV_off, ALIGN, \ + do { } while (0)) +#define fGEN_TCG_PRED_VEC_STORE_npred_ai(ALIGN) \ + fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \ + fEA_RI(RtV, siV * sizeof(MMVector)), \ + VsV_off, ALIGN, \ + do { } while (0)) +#define fGEN_TCG_PRED_VEC_STORE_new_pred_ai \ + fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \ + fEA_RI(RtV, siV * sizeof(MMVector)), \ + OsN_off, true, \ + do { } while (0)) +#define fGEN_TCG_PRED_VEC_STORE_new_npred_ai \ + fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \ + fEA_RI(RtV, siV * sizeof(MMVector)), \ + OsN_off, true, \ + do { } while (0)) + +#define fGEN_TCG_V6_vS32b_pred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_pred_ai(true) +#define fGEN_TCG_V6_vS32b_npred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_npred_ai(true) +#define fGEN_TCG_V6_vS32Ub_pred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_pred_ai(false) +#define fGEN_TCG_V6_vS32Ub_npred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_npred_ai(false) +#define fGEN_TCG_V6_vS32b_nt_pred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_pred_ai(true) +#define fGEN_TCG_V6_vS32b_nt_npred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_npred_ai(true) +#define fGEN_TCG_V6_vS32b_new_pred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_new_pred_ai +#define fGEN_TCG_V6_vS32b_new_npred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_new_npred_ai +#define fGEN_TCG_V6_vS32b_nt_new_pred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_new_pred_ai +#define fGEN_TCG_V6_vS32b_nt_new_npred_ai(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_new_npred_ai + +#define fGEN_TCG_PRED_VEC_STORE_pred_ppu(ALIGN) \ + fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \ + fEA_REG(RxV), \ + VsV_off, ALIGN, \ + fPM_M(RxV, MuV)) +#define fGEN_TCG_PRED_VEC_STORE_npred_ppu(ALIGN) \ + fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \ + fEA_REG(RxV), \ + VsV_off, ALIGN, \ + fPM_M(RxV, MuV)) +#define fGEN_TCG_PRED_VEC_STORE_new_pred_ppu \ + fGEN_TCG_PRED_VEC_STORE(fLSBOLD(PvV), \ + fEA_REG(RxV), \ + OsN_off, true, \ + fPM_M(RxV, MuV)) +#define fGEN_TCG_PRED_VEC_STORE_new_npred_ppu \ + fGEN_TCG_PRED_VEC_STORE(fLSBOLDNOT(PvV), \ + fEA_REG(RxV), \ + OsN_off, true, \ + fPM_M(RxV, MuV)) + +#define fGEN_TCG_V6_vS32b_pred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_pred_ppu(true) +#define fGEN_TCG_V6_vS32b_npred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_npred_ppu(true) +#define fGEN_TCG_V6_vS32Ub_pred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_pred_ppu(false) +#define fGEN_TCG_V6_vS32Ub_npred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_npred_ppu(false) +#define fGEN_TCG_V6_vS32b_nt_pred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_pred_ppu(true) +#define fGEN_TCG_V6_vS32b_nt_npred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_npred_ppu(true) +#define fGEN_TCG_V6_vS32b_new_pred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_new_pred_ppu +#define fGEN_TCG_V6_vS32b_new_npred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_new_npred_ppu +#define fGEN_TCG_V6_vS32b_nt_new_pred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_new_pred_ppu +#define fGEN_TCG_V6_vS32b_nt_new_npred_ppu(SHORTCODE) \ + fGEN_TCG_PRED_VEC_STORE_new_npred_ppu + +/* Masked vector stores */ +#define fGEN_TCG_V6_vS32b_qpred_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_nt_qpred_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_qpred_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_nt_qpred_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_qpred_ppu(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_nt_qpred_ppu(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_nqpred_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_nt_nqpred_pi(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_nqpred_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_nt_nqpred_ai(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_nqpred_ppu(SHORTCODE) SHORTCODE +#define fGEN_TCG_V6_vS32b_nt_nqpred_ppu(SHORTCODE) SHORTCODE + +/* Store release not modelled in qemu, but need to suppress compiler warnings */ +#define fGEN_TCG_V6_vS32b_srls_pi(SHORTCODE) \ + do { \ + siV = siV; \ + } while (0) +#define fGEN_TCG_V6_vS32b_srls_ai(SHORTCODE) \ + do { \ + RtV = RtV; \ + siV = siV; \ + } while (0) +#define fGEN_TCG_V6_vS32b_srls_ppu(SHORTCODE) \ + do { \ + MuV = MuV; \ + } while (0) + +#endif diff --git a/target/hexagon/genptr.c b/target/hexagon/genptr.c index 7333299615..4419d30e23 100644 --- a/target/hexagon/genptr.c +++ b/target/hexagon/genptr.c @@ -19,17 +19,20 @@ #include "cpu.h" #include "internal.h" #include "tcg/tcg-op.h" +#include "tcg/tcg-op-gvec.h" #include "insn.h" #include "opcodes.h" #include "translate.h" #define QEMU_GENERATE /* Used internally by macros.h */ #include "macros.h" +#include "mmvec/macros.h" #undef QEMU_GENERATE #include "gen_tcg.h" +#include "gen_tcg_hvx.h" static inline void gen_log_predicated_reg_write(int rnum, TCGv val, int slot) { - TCGv zero = tcg_const_tl(0); + TCGv zero = tcg_constant_tl(0); TCGv slot_mask = tcg_temp_new(); tcg_gen_andi_tl(slot_mask, hex_slot_cancelled, 1 << slot); @@ -47,7 +50,6 @@ static inline void gen_log_predicated_reg_write(int rnum, TCGv val, int slot) tcg_gen_or_tl(hex_reg_written[rnum], hex_reg_written[rnum], slot_mask); } - tcg_temp_free(zero); tcg_temp_free(slot_mask); } @@ -63,7 +65,7 @@ static inline void gen_log_reg_write(int rnum, TCGv val) static void gen_log_predicated_reg_write_pair(int rnum, TCGv_i64 val, int slot) { TCGv val32 = tcg_temp_new(); - TCGv zero = tcg_const_tl(0); + TCGv zero = tcg_constant_tl(0); TCGv slot_mask = tcg_temp_new(); tcg_gen_andi_tl(slot_mask, hex_slot_cancelled, 1 << slot); @@ -92,7 +94,6 @@ static void gen_log_predicated_reg_write_pair(int rnum, TCGv_i64 val, int slot) } tcg_temp_free(val32); - tcg_temp_free(zero); tcg_temp_free(slot_mask); } @@ -167,6 +168,9 @@ static inline void gen_read_ctrl_reg(DisasContext *ctx, const int reg_num, } else if (reg_num == HEX_REG_QEMU_INSN_CNT) { tcg_gen_addi_tl(dest, hex_gpr[HEX_REG_QEMU_INSN_CNT], ctx->num_insns); + } else if (reg_num == HEX_REG_QEMU_HVX_CNT) { + tcg_gen_addi_tl(dest, hex_gpr[HEX_REG_QEMU_HVX_CNT], + ctx->num_hvx_insns); } else { tcg_gen_mov_tl(dest, hex_gpr[reg_num]); } @@ -181,9 +185,8 @@ static inline void gen_read_ctrl_reg_pair(DisasContext *ctx, const int reg_num, tcg_gen_concat_i32_i64(dest, p3_0, hex_gpr[reg_num + 1]); tcg_temp_free(p3_0); } else if (reg_num == HEX_REG_PC - 1) { - TCGv pc = tcg_const_tl(ctx->base.pc_next); + TCGv pc = tcg_constant_tl(ctx->base.pc_next); tcg_gen_concat_i32_i64(dest, hex_gpr[reg_num], pc); - tcg_temp_free(pc); } else if (reg_num == HEX_REG_QEMU_PKT_CNT) { TCGv pkt_cnt = tcg_temp_new(); TCGv insn_cnt = tcg_temp_new(); @@ -194,6 +197,12 @@ static inline void gen_read_ctrl_reg_pair(DisasContext *ctx, const int reg_num, tcg_gen_concat_i32_i64(dest, pkt_cnt, insn_cnt); tcg_temp_free(pkt_cnt); tcg_temp_free(insn_cnt); + } else if (reg_num == HEX_REG_QEMU_HVX_CNT) { + TCGv hvx_cnt = tcg_temp_new(); + tcg_gen_addi_tl(hvx_cnt, hex_gpr[HEX_REG_QEMU_HVX_CNT], + ctx->num_hvx_insns); + tcg_gen_concat_i32_i64(dest, hvx_cnt, hex_gpr[reg_num + 1]); + tcg_temp_free(hvx_cnt); } else { tcg_gen_concat_i32_i64(dest, hex_gpr[reg_num], @@ -229,6 +238,9 @@ static inline void gen_write_ctrl_reg(DisasContext *ctx, int reg_num, if (reg_num == HEX_REG_QEMU_INSN_CNT) { ctx->num_insns = 0; } + if (reg_num == HEX_REG_QEMU_HVX_CNT) { + ctx->num_hvx_insns = 0; + } } } @@ -250,6 +262,9 @@ static inline void gen_write_ctrl_reg_pair(DisasContext *ctx, int reg_num, ctx->num_packets = 0; ctx->num_insns = 0; } + if (reg_num == HEX_REG_QEMU_HVX_CNT) { + ctx->num_hvx_insns = 0; + } } } @@ -331,15 +346,13 @@ static inline void gen_store_conditional4(DisasContext *ctx, tcg_gen_brcond_tl(TCG_COND_NE, vaddr, hex_llsc_addr, fail); - one = tcg_const_tl(0xff); - zero = tcg_const_tl(0); + one = tcg_constant_tl(0xff); + zero = tcg_constant_tl(0); tmp = tcg_temp_new(); tcg_gen_atomic_cmpxchg_tl(tmp, hex_llsc_addr, hex_llsc_val, src, ctx->mem_idx, MO_32); tcg_gen_movcond_tl(TCG_COND_EQ, pred, tmp, hex_llsc_val, one, zero); - tcg_temp_free(one); - tcg_temp_free(zero); tcg_temp_free(tmp); tcg_gen_br(done); @@ -359,16 +372,14 @@ static inline void gen_store_conditional8(DisasContext *ctx, tcg_gen_brcond_tl(TCG_COND_NE, vaddr, hex_llsc_addr, fail); - one = tcg_const_i64(0xff); - zero = tcg_const_i64(0); + one = tcg_constant_i64(0xff); + zero = tcg_constant_i64(0); tmp = tcg_temp_new_i64(); tcg_gen_atomic_cmpxchg_i64(tmp, hex_llsc_addr, hex_llsc_val_i64, src, ctx->mem_idx, MO_64); tcg_gen_movcond_i64(TCG_COND_EQ, tmp, tmp, hex_llsc_val_i64, one, zero); tcg_gen_extrl_i64_i32(pred, tmp); - tcg_temp_free_i64(one); - tcg_temp_free_i64(zero); tcg_temp_free_i64(tmp); tcg_gen_br(done); @@ -396,9 +407,8 @@ static inline void gen_store1(TCGv_env cpu_env, TCGv vaddr, TCGv src, static inline void gen_store1i(TCGv_env cpu_env, TCGv vaddr, int32_t src, DisasContext *ctx, int slot) { - TCGv tmp = tcg_const_tl(src); + TCGv tmp = tcg_constant_tl(src); gen_store1(cpu_env, vaddr, tmp, ctx, slot); - tcg_temp_free(tmp); } static inline void gen_store2(TCGv_env cpu_env, TCGv vaddr, TCGv src, @@ -411,9 +421,8 @@ static inline void gen_store2(TCGv_env cpu_env, TCGv vaddr, TCGv src, static inline void gen_store2i(TCGv_env cpu_env, TCGv vaddr, int32_t src, DisasContext *ctx, int slot) { - TCGv tmp = tcg_const_tl(src); + TCGv tmp = tcg_constant_tl(src); gen_store2(cpu_env, vaddr, tmp, ctx, slot); - tcg_temp_free(tmp); } static inline void gen_store4(TCGv_env cpu_env, TCGv vaddr, TCGv src, @@ -426,9 +435,8 @@ static inline void gen_store4(TCGv_env cpu_env, TCGv vaddr, TCGv src, static inline void gen_store4i(TCGv_env cpu_env, TCGv vaddr, int32_t src, DisasContext *ctx, int slot) { - TCGv tmp = tcg_const_tl(src); + TCGv tmp = tcg_constant_tl(src); gen_store4(cpu_env, vaddr, tmp, ctx, slot); - tcg_temp_free(tmp); } static inline void gen_store8(TCGv_env cpu_env, TCGv vaddr, TCGv_i64 src, @@ -443,21 +451,188 @@ static inline void gen_store8(TCGv_env cpu_env, TCGv vaddr, TCGv_i64 src, static inline void gen_store8i(TCGv_env cpu_env, TCGv vaddr, int64_t src, DisasContext *ctx, int slot) { - TCGv_i64 tmp = tcg_const_i64(src); + TCGv_i64 tmp = tcg_constant_i64(src); gen_store8(cpu_env, vaddr, tmp, ctx, slot); - tcg_temp_free_i64(tmp); } static TCGv gen_8bitsof(TCGv result, TCGv value) { - TCGv zero = tcg_const_tl(0); - TCGv ones = tcg_const_tl(0xff); + TCGv zero = tcg_constant_tl(0); + TCGv ones = tcg_constant_tl(0xff); tcg_gen_movcond_tl(TCG_COND_NE, result, value, zero, ones, zero); - tcg_temp_free(zero); - tcg_temp_free(ones); return result; } +static intptr_t vreg_src_off(DisasContext *ctx, int num) +{ + intptr_t offset = offsetof(CPUHexagonState, VRegs[num]); + + if (test_bit(num, ctx->vregs_select)) { + offset = ctx_future_vreg_off(ctx, num, 1, false); + } + if (test_bit(num, ctx->vregs_updated_tmp)) { + offset = ctx_tmp_vreg_off(ctx, num, 1, false); + } + return offset; +} + +static void gen_log_vreg_write(DisasContext *ctx, intptr_t srcoff, int num, + VRegWriteType type, int slot_num, + bool is_predicated) +{ + TCGLabel *label_end = NULL; + intptr_t dstoff; + + if (is_predicated) { + TCGv cancelled = tcg_temp_local_new(); + label_end = gen_new_label(); + + /* Don't do anything if the slot was cancelled */ + tcg_gen_extract_tl(cancelled, hex_slot_cancelled, slot_num, 1); + tcg_gen_brcondi_tl(TCG_COND_NE, cancelled, 0, label_end); + tcg_temp_free(cancelled); + } + + if (type != EXT_TMP) { + dstoff = ctx_future_vreg_off(ctx, num, 1, true); + tcg_gen_gvec_mov(MO_64, dstoff, srcoff, + sizeof(MMVector), sizeof(MMVector)); + tcg_gen_ori_tl(hex_VRegs_updated, hex_VRegs_updated, 1 << num); + } else { + dstoff = ctx_tmp_vreg_off(ctx, num, 1, false); + tcg_gen_gvec_mov(MO_64, dstoff, srcoff, + sizeof(MMVector), sizeof(MMVector)); + } + + if (is_predicated) { + gen_set_label(label_end); + } +} + +static void gen_log_vreg_write_pair(DisasContext *ctx, intptr_t srcoff, int num, + VRegWriteType type, int slot_num, + bool is_predicated) +{ + gen_log_vreg_write(ctx, srcoff, num ^ 0, type, slot_num, is_predicated); + srcoff += sizeof(MMVector); + gen_log_vreg_write(ctx, srcoff, num ^ 1, type, slot_num, is_predicated); +} + +static void gen_log_qreg_write(intptr_t srcoff, int num, int vnew, + int slot_num, bool is_predicated) +{ + TCGLabel *label_end = NULL; + intptr_t dstoff; + + if (is_predicated) { + TCGv cancelled = tcg_temp_local_new(); + label_end = gen_new_label(); + + /* Don't do anything if the slot was cancelled */ + tcg_gen_extract_tl(cancelled, hex_slot_cancelled, slot_num, 1); + tcg_gen_brcondi_tl(TCG_COND_NE, cancelled, 0, label_end); + tcg_temp_free(cancelled); + } + + dstoff = offsetof(CPUHexagonState, future_QRegs[num]); + tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMQReg), sizeof(MMQReg)); + + if (is_predicated) { + tcg_gen_ori_tl(hex_QRegs_updated, hex_QRegs_updated, 1 << num); + gen_set_label(label_end); + } +} + +static void gen_vreg_load(DisasContext *ctx, intptr_t dstoff, TCGv src, + bool aligned) +{ + TCGv_i64 tmp = tcg_temp_new_i64(); + if (aligned) { + tcg_gen_andi_tl(src, src, ~((int32_t)sizeof(MMVector) - 1)); + } + for (int i = 0; i < sizeof(MMVector) / 8; i++) { + tcg_gen_qemu_ld64(tmp, src, ctx->mem_idx); + tcg_gen_addi_tl(src, src, 8); + tcg_gen_st_i64(tmp, cpu_env, dstoff + i * 8); + } + tcg_temp_free_i64(tmp); +} + +static void gen_vreg_store(DisasContext *ctx, Insn *insn, Packet *pkt, + TCGv EA, intptr_t srcoff, int slot, bool aligned) +{ + intptr_t dstoff = offsetof(CPUHexagonState, vstore[slot].data); + intptr_t maskoff = offsetof(CPUHexagonState, vstore[slot].mask); + + if (is_gather_store_insn(insn, pkt)) { + TCGv sl = tcg_constant_tl(slot); + gen_helper_gather_store(cpu_env, EA, sl); + return; + } + + tcg_gen_movi_tl(hex_vstore_pending[slot], 1); + if (aligned) { + tcg_gen_andi_tl(hex_vstore_addr[slot], EA, + ~((int32_t)sizeof(MMVector) - 1)); + } else { + tcg_gen_mov_tl(hex_vstore_addr[slot], EA); + } + tcg_gen_movi_tl(hex_vstore_size[slot], sizeof(MMVector)); + + /* Copy the data to the vstore buffer */ + tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMVector), sizeof(MMVector)); + /* Set the mask to all 1's */ + tcg_gen_gvec_dup_imm(MO_64, maskoff, sizeof(MMQReg), sizeof(MMQReg), ~0LL); +} + +static void gen_vreg_masked_store(DisasContext *ctx, TCGv EA, intptr_t srcoff, + intptr_t bitsoff, int slot, bool invert) +{ + intptr_t dstoff = offsetof(CPUHexagonState, vstore[slot].data); + intptr_t maskoff = offsetof(CPUHexagonState, vstore[slot].mask); + + tcg_gen_movi_tl(hex_vstore_pending[slot], 1); + tcg_gen_andi_tl(hex_vstore_addr[slot], EA, + ~((int32_t)sizeof(MMVector) - 1)); + tcg_gen_movi_tl(hex_vstore_size[slot], sizeof(MMVector)); + + /* Copy the data to the vstore buffer */ + tcg_gen_gvec_mov(MO_64, dstoff, srcoff, sizeof(MMVector), sizeof(MMVector)); + /* Copy the mask */ + tcg_gen_gvec_mov(MO_64, maskoff, bitsoff, sizeof(MMQReg), sizeof(MMQReg)); + if (invert) { + tcg_gen_gvec_not(MO_64, maskoff, maskoff, + sizeof(MMQReg), sizeof(MMQReg)); + } +} + +static void vec_to_qvec(size_t size, intptr_t dstoff, intptr_t srcoff) +{ + TCGv_i64 tmp = tcg_temp_new_i64(); + TCGv_i64 word = tcg_temp_new_i64(); + TCGv_i64 bits = tcg_temp_new_i64(); + TCGv_i64 mask = tcg_temp_new_i64(); + TCGv_i64 zero = tcg_constant_i64(0); + TCGv_i64 ones = tcg_constant_i64(~0); + + for (int i = 0; i < sizeof(MMVector) / 8; i++) { + tcg_gen_ld_i64(tmp, cpu_env, srcoff + i * 8); + tcg_gen_movi_i64(mask, 0); + + for (int j = 0; j < 8; j += size) { + tcg_gen_extract_i64(word, tmp, j * 8, size * 8); + tcg_gen_movcond_i64(TCG_COND_NE, bits, word, zero, ones, zero); + tcg_gen_deposit_i64(mask, mask, bits, j, size); + } + + tcg_gen_st8_i64(mask, cpu_env, dstoff + i); + } + tcg_temp_free_i64(tmp); + tcg_temp_free_i64(word); + tcg_temp_free_i64(bits); + tcg_temp_free_i64(mask); +} + #include "tcg_funcs_generated.c.inc" #include "tcg_func_table_generated.c.inc" diff --git a/target/hexagon/helper.h b/target/hexagon/helper.h index ca201fb680..c89aa4ed4d 100644 --- a/target/hexagon/helper.h +++ b/target/hexagon/helper.h @@ -23,6 +23,8 @@ DEF_HELPER_1(debug_start_packet, void, env) DEF_HELPER_FLAGS_3(debug_check_store_width, TCG_CALL_NO_WG, void, env, int, int) DEF_HELPER_FLAGS_3(debug_commit_end, TCG_CALL_NO_WG, void, env, int, int) DEF_HELPER_2(commit_store, void, env, int) +DEF_HELPER_3(gather_store, void, env, i32, int) +DEF_HELPER_1(commit_hvx_stores, void, env) DEF_HELPER_FLAGS_4(fcircadd, TCG_CALL_NO_RWG_SE, s32, s32, s32, s32, s32) DEF_HELPER_FLAGS_1(fbrev, TCG_CALL_NO_RWG_SE, i32, i32) DEF_HELPER_3(sfrecipa, i64, env, f32, f32) @@ -89,3 +91,19 @@ DEF_HELPER_4(sffms_lib, f32, env, f32, f32, f32) DEF_HELPER_3(dfmpyfix, f64, env, f64, f64) DEF_HELPER_4(dfmpyhh, f64, env, f64, f64, f64) + +/* Histogram instructions */ +DEF_HELPER_1(vhist, void, env) +DEF_HELPER_1(vhistq, void, env) +DEF_HELPER_1(vwhist256, void, env) +DEF_HELPER_1(vwhist256q, void, env) +DEF_HELPER_1(vwhist256_sat, void, env) +DEF_HELPER_1(vwhist256q_sat, void, env) +DEF_HELPER_1(vwhist128, void, env) +DEF_HELPER_1(vwhist128q, void, env) +DEF_HELPER_2(vwhist128m, void, env, s32) +DEF_HELPER_2(vwhist128qm, void, env, s32) + +DEF_HELPER_2(probe_pkt_scalar_store_s0, void, env, int) +DEF_HELPER_2(probe_hvx_stores, void, env, int) +DEF_HELPER_3(probe_pkt_scalar_hvx_stores, void, env, int, int) diff --git a/target/hexagon/hex_arch_types.h b/target/hexagon/hex_arch_types.h index d721e1f934..78ad607f53 100644 --- a/target/hexagon/hex_arch_types.h +++ b/target/hexagon/hex_arch_types.h @@ -19,6 +19,7 @@ #define HEXAGON_ARCH_TYPES_H #include "qemu/osdep.h" +#include "mmvec/mmvec.h" #include "qemu/int128.h" /* @@ -35,4 +36,8 @@ typedef uint64_t size8u_t; typedef int64_t size8s_t; typedef Int128 size16s_t; +typedef MMVector mmvector_t; +typedef MMVectorPair mmvector_pair_t; +typedef MMQReg mmqret_t; + #endif diff --git a/target/hexagon/hex_common.py b/target/hexagon/hex_common.py index b3b534057d..c81aca8d2a 100755 --- a/target/hexagon/hex_common.py +++ b/target/hexagon/hex_common.py @@ -73,6 +73,8 @@ def calculate_attribs(): add_qemu_macro_attrib('fWRITE_P1', 'A_WRITES_PRED_REG') add_qemu_macro_attrib('fWRITE_P2', 'A_WRITES_PRED_REG') add_qemu_macro_attrib('fWRITE_P3', 'A_WRITES_PRED_REG') + add_qemu_macro_attrib('fSET_OVERFLOW', 'A_IMPLICIT_WRITES_USR') + add_qemu_macro_attrib('fSET_LPCFG', 'A_IMPLICIT_WRITES_USR') # Recurse down macros, find attributes from sub-macros macroValues = list(macros.values()) @@ -143,6 +145,9 @@ def compute_tag_immediates(tag): ## P predicate register ## R GPR register ## M modifier register +## Q HVX predicate vector +## V HVX vector register +## O HVX new vector register ## regid can be one of the following ## d, e destination register ## dd destination register pair @@ -178,6 +183,9 @@ def is_readwrite(regid): def is_scalar_reg(regtype): return regtype in "RPC" +def is_hvx_reg(regtype): + return regtype in "VQ" + def is_old_val(regtype, regid, tag): return regtype+regid+'V' in semdict[tag] @@ -201,6 +209,13 @@ def need_ea(tag): def skip_qemu_helper(tag): return tag in overrides.keys() +def is_tmp_result(tag): + return ('A_CVI_TMP' in attribdict[tag] or + 'A_CVI_TMP_DST' in attribdict[tag]) + +def is_new_result(tag): + return ('A_CVI_NEW' in attribdict[tag]) + def imm_name(immlett): return "%siV" % immlett diff --git a/target/hexagon/hex_regs.h b/target/hexagon/hex_regs.h index f291911eef..e1b3149b07 100644 --- a/target/hexagon/hex_regs.h +++ b/target/hexagon/hex_regs.h @@ -76,6 +76,7 @@ enum { /* Use reserved control registers for qemu execution counts */ HEX_REG_QEMU_PKT_CNT = 52, HEX_REG_QEMU_INSN_CNT = 53, + HEX_REG_QEMU_HVX_CNT = 54, HEX_REG_UTIMERLO = 62, HEX_REG_UTIMERHI = 63, }; diff --git a/target/hexagon/imported/allext.idef b/target/hexagon/imported/allext.idef new file mode 100644 index 0000000000..9d4b23e9de --- /dev/null +++ b/target/hexagon/imported/allext.idef @@ -0,0 +1,25 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +/* + * Top level file for all instruction set extensions + */ +#define EXTNAME mmvec +#define EXTSTR "mmvec" +#include "mmvec/ext.idef" +#undef EXTNAME +#undef EXTSTR diff --git a/target/hexagon/imported/allext_macros.def b/target/hexagon/imported/allext_macros.def new file mode 100644 index 0000000000..9c91199151 --- /dev/null +++ b/target/hexagon/imported/allext_macros.def @@ -0,0 +1,25 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +/* + * Top level file for all instruction set extensions + */ +#define EXTNAME mmvec +#define EXTSTR "mmvec" +#include "mmvec/macros.def" +#undef EXTNAME +#undef EXTSTR diff --git a/target/hexagon/imported/allextenc.def b/target/hexagon/imported/allextenc.def new file mode 100644 index 0000000000..39a3e93fad --- /dev/null +++ b/target/hexagon/imported/allextenc.def @@ -0,0 +1,20 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#define EXTNAME mmvec +#include "mmvec/encode_ext.def" +#undef EXTNAME diff --git a/target/hexagon/imported/allidefs.def b/target/hexagon/imported/allidefs.def index 2aace29888..ee253b83a9 100644 --- a/target/hexagon/imported/allidefs.def +++ b/target/hexagon/imported/allidefs.def @@ -28,3 +28,4 @@ #include "shift.idef" #include "system.idef" #include "subinsns.idef" +#include "allext.idef" diff --git a/target/hexagon/imported/encode.def b/target/hexagon/imported/encode.def index b9368d1284..e40e7fbffb 100644 --- a/target/hexagon/imported/encode.def +++ b/target/hexagon/imported/encode.def @@ -71,6 +71,7 @@ #include "encode_pp.def" #include "encode_subinsn.def" +#include "allextenc.def" #ifdef __SELF_DEF_FIELD32 #undef __SELF_DEF_FIELD32 diff --git a/target/hexagon/imported/macros.def b/target/hexagon/imported/macros.def index 32ed3bf8fc..e23f91562e 100755 --- a/target/hexagon/imported/macros.def +++ b/target/hexagon/imported/macros.def @@ -176,6 +176,12 @@ DEF_MACRO( (A_DOTNEWVALUE,A_RESTRICT_SLOT0ONLY) ) +DEF_MACRO( + fVSATUVALN, + ({ ((VAL) < 0) ? 0 : ((1LL<<(N))-1);}), + () +) + DEF_MACRO( fSATUVALN, ({fSET_OVERFLOW(); ((VAL) < 0) ? 0 : ((1LL<<(N))-1);}), @@ -188,6 +194,12 @@ DEF_MACRO( () ) +DEF_MACRO( + fVSATVALN, + ({((VAL) < 0) ? (-(1LL<<((N)-1))) : ((1LL<<((N)-1))-1);}), + () +) + DEF_MACRO( fZXTN, /* macro name */ ((VAL) & ((1LL<<(N))-1)), @@ -205,6 +217,11 @@ DEF_MACRO( ((fSXTN(N,64,VAL) == (VAL)) ? (VAL) : fSATVALN(N,VAL)), () ) +DEF_MACRO( + fVSATN, + ((fSXTN(N,64,VAL) == (VAL)) ? (VAL) : fVSATVALN(N,VAL)), + () +) DEF_MACRO( fADDSAT64, @@ -234,6 +251,12 @@ DEF_MACRO( () ) +DEF_MACRO( + fVSATUN, + ((fZXTN(N,64,VAL) == (VAL)) ? (VAL) : fVSATUVALN(N,VAL)), + () +) + DEF_MACRO( fSATUN, ((fZXTN(N,64,VAL) == (VAL)) ? (VAL) : fSATUVALN(N,VAL)), @@ -253,6 +276,19 @@ DEF_MACRO( () ) +DEF_MACRO( + fVSATH, + (fVSATN(16,VAL)), + () +) + +DEF_MACRO( + fVSATUH, + (fVSATUN(16,VAL)), + () +) + + DEF_MACRO( fSATUB, (fSATUN(8,VAL)), @@ -265,6 +301,20 @@ DEF_MACRO( ) +DEF_MACRO( + fVSATUB, + (fVSATUN(8,VAL)), + () +) +DEF_MACRO( + fVSATB, + (fVSATN(8,VAL)), + () +) + + + + /*************************************/ /* immediate extension */ /*************************************/ @@ -556,6 +606,18 @@ DEF_MACRO( /* optional attributes */ ) +DEF_MACRO( + fCAST2_2s, /* macro name */ + ((size2s_t)(A)), + /* optional attributes */ +) + +DEF_MACRO( + fCAST2_2u, /* macro name */ + ((size2u_t)(A)), + /* optional attributes */ +) + DEF_MACRO( fCAST4_4s, /* macro name */ ((size4s_t)(A)), @@ -876,6 +938,11 @@ DEF_MACRO( (((size8s_t)(A))<. + */ + +#define CONCAT(A,B) A##B +#define EXTEXTNAME(X) CONCAT(EXT_,X) +#define DEF_ENC(TAG,STR) DEF_EXT_ENC(TAG,EXTEXTNAME(EXTNAME),STR) + + +#ifndef NO_MMVEC +DEF_ENC(V6_extractw, ICLASS_LD" 001 0 000sssss PP0uuuuu --1ddddd") /* coproc insn, returns Rd */ +#endif + + +#ifndef NO_MMVEC + + + +DEF_CLASS32(ICLASS_NCJ" 1--- -------- PP------ --------",COPROC_VMEM) +DEF_CLASS32(ICLASS_NCJ" 1000 0-0ttttt PPi--iii ---ddddd",BaseOffset_VMEM_Loads) +DEF_CLASS32(ICLASS_NCJ" 1000 1-0ttttt PPivviii ---ddddd",BaseOffset_if_Pv_VMEM_Loads) +DEF_CLASS32(ICLASS_NCJ" 1000 0-1ttttt PPi--iii --------",BaseOffset_VMEM_Stores1) +DEF_CLASS32(ICLASS_NCJ" 1000 1-0ttttt PPi--iii 00------",BaseOffset_VMEM_Stores2) +DEF_CLASS32(ICLASS_NCJ" 1000 1-1ttttt PPivviii --------",BaseOffset_if_Pv_VMEM_Stores) + +DEF_CLASS32(ICLASS_NCJ" 1001 0-0xxxxx PP---iii ---ddddd",PostImm_VMEM_Loads) +DEF_CLASS32(ICLASS_NCJ" 1001 1-0xxxxx PP-vviii ---ddddd",PostImm_if_Pv_VMEM_Loads) +DEF_CLASS32(ICLASS_NCJ" 1001 0-1xxxxx PP---iii --------",PostImm_VMEM_Stores1) +DEF_CLASS32(ICLASS_NCJ" 1001 1-0xxxxx PP---iii 00------",PostImm_VMEM_Stores2) +DEF_CLASS32(ICLASS_NCJ" 1001 1-1xxxxx PP-vviii --------",PostImm_if_Pv_VMEM_Stores) + +DEF_CLASS32(ICLASS_NCJ" 1011 0-0xxxxx PPu----- ---ddddd",PostM_VMEM_Loads) +DEF_CLASS32(ICLASS_NCJ" 1011 1-0xxxxx PPuvv--- ---ddddd",PostM_if_Pv_VMEM_Loads) +DEF_CLASS32(ICLASS_NCJ" 1011 0-1xxxxx PPu----- --------",PostM_VMEM_Stores1) +DEF_CLASS32(ICLASS_NCJ" 1011 1-0xxxxx PPu----- 00------",PostM_VMEM_Stores2) +DEF_CLASS32(ICLASS_NCJ" 1011 1-1xxxxx PPuvv--- --------",PostM_if_Pv_VMEM_Stores) + +DEF_CLASS32(ICLASS_NCJ" 110- 0------- PP------ --------",Z_Load) +DEF_CLASS32(ICLASS_NCJ" 110- 1------- PP------ --------",Z_Load_if_Pv) + +DEF_CLASS32(ICLASS_NCJ" 1111 000ttttt PPu--0-- ---vvvvv",Gather) +DEF_CLASS32(ICLASS_NCJ" 1111 000ttttt PPu--1-- -ssvvvvv",Gather_if_Qs) +DEF_CLASS32(ICLASS_NCJ" 1111 001ttttt PPuvvvvv ---wwwww",Scatter) +DEF_CLASS32(ICLASS_NCJ" 1111 001ttttt PPuvvvvv -----sss",Scatter_New) +DEF_CLASS32(ICLASS_NCJ" 1111 1--ttttt PPuvvvvv -sswwwww",Scatter_if_Qs) + + +DEF_FIELD32(ICLASS_NCJ" 1--- -!------ PP------ --------",NT,"NonTemporal") + + + +DEF_FIELDROW_DESC32( ICLASS_NCJ" 1 000 --- ----- PP i --iii ----- ---","[#0] vmem(Rt+#s4)[:nt]") + +#define LDST_ENC(TAG,MAJ3,MID3,RREG,TINY6,MIN3,VREG) DEF_ENC(TAG, ICLASS_NCJ "1" #MAJ3 #MID3 #RREG "PP" #TINY6 #MIN3 #VREG) + +#define LDST_BO(TAGPRE,MID3,PRED,MIN3,VREG) LDST_ENC(TAGPRE##_ai, 000,MID3,ttttt,i PRED iii,MIN3,VREG) +#define LDST_PI(TAGPRE,MID3,PRED,MIN3,VREG) LDST_ENC(TAGPRE##_pi, 001,MID3,xxxxx,- PRED iii,MIN3,VREG) +#define LDST_PM(TAGPRE,MID3,PRED,MIN3,VREG) LDST_ENC(TAGPRE##_ppu,011,MID3,xxxxx,u PRED ---,MIN3,VREG) + +#define LDST_BASICLD(OP,TAGPRE) \ + OP(TAGPRE, 000,00,000,ddddd) \ + OP(TAGPRE##_nt, 010,00,000,ddddd) \ + OP(TAGPRE##_cur, 000,00,001,ddddd) \ + OP(TAGPRE##_nt_cur, 010,00,001,ddddd) \ + OP(TAGPRE##_tmp, 000,00,010,ddddd) \ + OP(TAGPRE##_nt_tmp, 010,00,010,ddddd) + +#define LDST_BASICST(OP,TAGPRE) \ + OP(TAGPRE, 001,--,000,sssss) \ + OP(TAGPRE##_nt, 011,--,000,sssss) \ + OP(TAGPRE##_new, 001,--,001,-0sss) \ + OP(TAGPRE##_srls, 001,--,001,-1---) \ + OP(TAGPRE##_nt_new, 011,--,001,--sss) \ + + +#define LDST_QPREDST(OP,TAGPRE) \ + OP(TAGPRE##_qpred, 100,vv,000,sssss) \ + OP(TAGPRE##_nt_qpred, 110,vv,000,sssss) \ + OP(TAGPRE##_nqpred, 100,vv,001,sssss) \ + OP(TAGPRE##_nt_nqpred,110,vv,001,sssss) \ + +#define LDST_CONDLD(OP,TAGPRE) \ + OP(TAGPRE##_pred, 100,vv,010,ddddd) \ + OP(TAGPRE##_nt_pred, 110,vv,010,ddddd) \ + OP(TAGPRE##_npred, 100,vv,011,ddddd) \ + OP(TAGPRE##_nt_npred, 110,vv,011,ddddd) \ + OP(TAGPRE##_cur_pred, 100,vv,100,ddddd) \ + OP(TAGPRE##_nt_cur_pred, 110,vv,100,ddddd) \ + OP(TAGPRE##_cur_npred, 100,vv,101,ddddd) \ + OP(TAGPRE##_nt_cur_npred, 110,vv,101,ddddd) \ + OP(TAGPRE##_tmp_pred, 100,vv,110,ddddd) \ + OP(TAGPRE##_nt_tmp_pred, 110,vv,110,ddddd) \ + OP(TAGPRE##_tmp_npred, 100,vv,111,ddddd) \ + OP(TAGPRE##_nt_tmp_npred, 110,vv,111,ddddd) \ + +#define LDST_PREDST(OP,TAGPRE,NT,MIN2) \ + OP(TAGPRE##_pred, 1 NT 1,vv,MIN2 0,sssss) \ + OP(TAGPRE##_npred, 1 NT 1,vv,MIN2 1,sssss) + +#define LDST_PREDSTNEW(OP,TAGPRE,NT,MIN2) \ + OP(TAGPRE##_pred, 1 NT 1,vv,MIN2 0,NT 0 sss) \ + OP(TAGPRE##_npred, 1 NT 1,vv,MIN2 1,NT 1 sss) + +// 0.0,vv,0--,sssss: pred st +#define LDST_BASICPREDST(OP,TAGPRE) \ + LDST_PREDST(OP,TAGPRE, 0,00) \ + LDST_PREDST(OP,TAGPRE##_nt, 1,00) \ + LDST_PREDSTNEW(OP,TAGPRE##_new, 0,01) \ + LDST_PREDSTNEW(OP,TAGPRE##_nt_new, 1,01) + + + +LDST_BASICLD(LDST_BO,V6_vL32b) +LDST_CONDLD(LDST_BO,V6_vL32b) +LDST_BASICLD(LDST_PI,V6_vL32b) +LDST_CONDLD(LDST_PI,V6_vL32b) +LDST_BASICLD(LDST_PM,V6_vL32b) +LDST_CONDLD(LDST_PM,V6_vL32b) + +// Loads + +LDST_BO(V6_vL32Ub,000,00,111,ddddd) +//Stores +LDST_BASICST(LDST_BO,V6_vS32b) + + +LDST_BO(V6_vS32Ub,001,--,111,sssss) + + + + +// Byte Enabled Stores +LDST_QPREDST(LDST_BO,V6_vS32b) + +// Scalar Predicated Stores +LDST_BASICPREDST(LDST_BO,V6_vS32b) + + +LDST_PREDST(LDST_BO,V6_vS32Ub,0,11) + + + + +DEF_FIELDROW_DESC32( ICLASS_NCJ" 1 001 --- ----- PP - ----- ddddd ---","[#1] vmem(Rx++#s3)[:nt]") + +// Loads +LDST_PI(V6_vL32Ub,000,00,111,ddddd) + +//Stores +LDST_BASICST(LDST_PI,V6_vS32b) + + + +LDST_PI(V6_vS32Ub,001,--,111,sssss) + + +// Byte Enabled Stores +LDST_QPREDST(LDST_PI,V6_vS32b) + + +// Scalar Predicated Stores +LDST_BASICPREDST(LDST_PI,V6_vS32b) + + +LDST_PREDST(LDST_PI,V6_vS32Ub,0,11) + + + +DEF_FIELDROW_DESC32( ICLASS_NCJ" 1 011 --- ----- PP - ----- ----- ---","[#3] vmem(Rx++#M)[:nt]") + +// Loads +LDST_PM(V6_vL32Ub,000,00,111,ddddd) + +//Stores +LDST_BASICST(LDST_PM,V6_vS32b) + + + +LDST_PM(V6_vS32Ub,001,--,111,sssss) + +// Byte Enabled Stores +LDST_QPREDST(LDST_PM,V6_vS32b) + +// Scalar Predicated Stores +LDST_BASICPREDST(LDST_PM,V6_vS32b) + + +LDST_PREDST(LDST_PM,V6_vS32Ub,0,11) + + + +DEF_ENC(V6_vaddcarrysat, ICLASS_CJ" 1 101 100 vvvvv PP 1 uuuuu 0ss ddddd") // +DEF_ENC(V6_vaddcarryo, ICLASS_CJ" 1 101 101 vvvvv PP 1 uuuuu 0ee ddddd") // +DEF_ENC(V6_vsubcarryo, ICLASS_CJ" 1 101 101 vvvvv PP 1 uuuuu 1ee ddddd") // +DEF_ENC(V6_vsatdw, ICLASS_CJ" 1 101 100 vvvvv PP 1 uuuuu 111 ddddd") // + +DEF_FIELDROW_DESC32( ICLASS_NCJ" 1 111 --- ----- PP - ----- ----- ---","[#6] vgather,vscatter") +DEF_ENC(V6_vgathermw, ICLASS_NCJ" 1 111 000 ttttt PP u --000 --- vvvvv") // vtmp.w=vmem(Rt32,Mu2,Vv32.w).w +DEF_ENC(V6_vgathermh, ICLASS_NCJ" 1 111 000 ttttt PP u --001 --- vvvvv") // vtmp.h=vmem(Rt32,Mu2,Vv32.h).h +DEF_ENC(V6_vgathermhw, ICLASS_NCJ" 1 111 000 ttttt PP u --010 --- vvvvv") // vtmp.h=vmem(Rt32,Mu2,Vvv32.w).h + + +DEF_ENC(V6_vgathermwq, ICLASS_NCJ" 1 111 000 ttttt PP u --100 -ss vvvvv") // if (Qs4) vtmp.w=vmem(Rt32,Mu2,Vv32.w).w +DEF_ENC(V6_vgathermhq, ICLASS_NCJ" 1 111 000 ttttt PP u --101 -ss vvvvv") // if (Qs4) vtmp.h=vmem(Rt32,Mu2,Vv32.h).h +DEF_ENC(V6_vgathermhwq, ICLASS_NCJ" 1 111 000 ttttt PP u --110 -ss vvvvv") // if (Qs4) vtmp.h=vmem(Rt32,Mu2,Vvv32.w).h + + + +DEF_ENC(V6_vscattermw, ICLASS_NCJ" 1 111 001 ttttt PP u vvvvv 000 wwwww") // vmem(Rt32,Mu2,Vv32.w)=Vw32.w +DEF_ENC(V6_vscattermh, ICLASS_NCJ" 1 111 001 ttttt PP u vvvvv 001 wwwww") // vmem(Rt32,Mu2,Vv32.h)=Vw32.h +DEF_ENC(V6_vscattermhw, ICLASS_NCJ" 1 111 001 ttttt PP u vvvvv 010 wwwww") // vmem(Rt32,Mu2,Vv32.h)=Vw32.h + +DEF_ENC(V6_vscattermw_add, ICLASS_NCJ" 1 111 001 ttttt PP u vvvvv 100 wwwww") // vmem(Rt32,Mu2,Vv32.w) += Vw32.w +DEF_ENC(V6_vscattermh_add, ICLASS_NCJ" 1 111 001 ttttt PP u vvvvv 101 wwwww") // vmem(Rt32,Mu2,Vv32.h) += Vw32.h +DEF_ENC(V6_vscattermhw_add, ICLASS_NCJ" 1 111 001 ttttt PP u vvvvv 110 wwwww") // vmem(Rt32,Mu2,Vv32.h) += Vw32.h + + +DEF_ENC(V6_vscattermwq, ICLASS_NCJ" 1 111 100 ttttt PP u vvvvv 0ss wwwww") // if (Qs4) vmem(Rt32,Mu2,Vv32.w)=Vw32.w +DEF_ENC(V6_vscattermhq, ICLASS_NCJ" 1 111 100 ttttt PP u vvvvv 1ss wwwww") // if (Qs4) vmem(Rt32,Mu2,Vv32.h)=Vw32.h +DEF_ENC(V6_vscattermhwq, ICLASS_NCJ" 1 111 101 ttttt PP u vvvvv 0ss wwwww") // if (Qs4) vmem(Rt32,Mu2,Vv32.h)=Vw32.h + + + + + +DEF_CLASS32(ICLASS_CJ" 1--- -------- PP------ --------",COPROC_VX) + + + +/*************************************************************** +* +* Group #0, Uses Q6 Rt8: new in v61 +* +****************************************************************/ + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 000 --- ----- PP - ----- ----- ---","[#1] Vd32=(Vu32, Vv32, Rt8)") +DEF_ENC(V6_vasrhbsat, ICLASS_CJ" 1 000 vvv vvttt PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vasruwuhrndsat, ICLASS_CJ" 1 000 vvv vvttt PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vasrwuhrndsat, ICLASS_CJ" 1 000 vvv vvttt PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vlutvvb_nm, ICLASS_CJ" 1 000 vvv vvttt PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vlutvwh_nm, ICLASS_CJ" 1 000 vvv vvttt PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vasruhubrndsat, ICLASS_CJ" 1 000 vvv vvttt PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vasruwuhsat, ICLASS_CJ" 1 000 vvv vvttt PP 1 uuuuu 100 ddddd") // +DEF_ENC(V6_vasruhubsat, ICLASS_CJ" 1 000 vvv vvttt PP 1 uuuuu 101 ddddd") // + +/*************************************************************** +* +* Group #1, Uses Q6 Rt32 +* +****************************************************************/ + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 001 --- ----- PP - ----- ----- ---","[#1] Vd32=(Vu32, Rt32)") +DEF_ENC(V6_vtmpyb, ICLASS_CJ" 1 001 000 ttttt PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vtmpybus, ICLASS_CJ" 1 001 000 ttttt PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vdmpyhb, ICLASS_CJ" 1 001 000 ttttt PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vrmpyub, ICLASS_CJ" 1 001 000 ttttt PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vrmpybus, ICLASS_CJ" 1 001 000 ttttt PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vdsaduh, ICLASS_CJ" 1 001 000 ttttt PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vdmpybus, ICLASS_CJ" 1 001 000 ttttt PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vdmpybus_dv, ICLASS_CJ" 1 001 000 ttttt PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vdmpyhsusat, ICLASS_CJ" 1 001 001 ttttt PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vdmpyhsuisat, ICLASS_CJ" 1 001 001 ttttt PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vdmpyhsat, ICLASS_CJ" 1 001 001 ttttt PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vdmpyhisat, ICLASS_CJ" 1 001 001 ttttt PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vdmpyhb_dv, ICLASS_CJ" 1 001 001 ttttt PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vmpybus, ICLASS_CJ" 1 001 001 ttttt PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vmpabus, ICLASS_CJ" 1 001 001 ttttt PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vmpahb, ICLASS_CJ" 1 001 001 ttttt PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vmpyh, ICLASS_CJ" 1 001 010 ttttt PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vmpyhss, ICLASS_CJ" 1 001 010 ttttt PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vmpyhsrs, ICLASS_CJ" 1 001 010 ttttt PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vmpyuh, ICLASS_CJ" 1 001 010 ttttt PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vrmpybusi, ICLASS_CJ" 1 001 010 ttttt PP 0 uuuuu 10i ddddd") // +DEF_ENC(V6_vrsadubi, ICLASS_CJ" 1 001 010 ttttt PP 0 uuuuu 11i ddddd") // + +DEF_ENC(V6_vmpyihb, ICLASS_CJ" 1 001 011 ttttt PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vror, ICLASS_CJ" 1 001 011 ttttt PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vmpyuhe, ICLASS_CJ" 1 001 011 ttttt PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vmpabuu, ICLASS_CJ" 1 001 011 ttttt PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vlut4, ICLASS_CJ" 1 001 011 ttttt PP 0 uuuuu 100 ddddd") // + + +DEF_ENC(V6_vasrw, ICLASS_CJ" 1 001 011 ttttt PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vasrh, ICLASS_CJ" 1 001 011 ttttt PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vaslw, ICLASS_CJ" 1 001 011 ttttt PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vaslh, ICLASS_CJ" 1 001 100 ttttt PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vlsrw, ICLASS_CJ" 1 001 100 ttttt PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vlsrh, ICLASS_CJ" 1 001 100 ttttt PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vlsrb, ICLASS_CJ" 1 001 100 ttttt PP 0 uuuuu 011 ddddd") // + +DEF_ENC(V6_vmpauhb, ICLASS_CJ" 1 001 100 ttttt PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vmpyiwub, ICLASS_CJ" 1 001 100 ttttt PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vmpyiwh, ICLASS_CJ" 1 001 100 ttttt PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vmpyiwb, ICLASS_CJ" 1 001 101 ttttt PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_lvsplatw, ICLASS_CJ" 1 001 101 ttttt PP 0 ----0 001 ddddd") // + + + +DEF_ENC(V6_pred_scalar2, ICLASS_CJ" 1 001 101 ttttt PP 0 ----- 010 -01dd") // +DEF_ENC(V6_vandvrt, ICLASS_CJ" 1 001 101 ttttt PP 0 uuuuu 010 -10dd") // +DEF_ENC(V6_pred_scalar2v2, ICLASS_CJ" 1 001 101 ttttt PP 0 ----- 010 -11dd") // + +DEF_ENC(V6_vtmpyhb, ICLASS_CJ" 1 001 101 ttttt PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vandqrt, ICLASS_CJ" 1 001 101 ttttt PP 0 --0uu 101 ddddd") // +DEF_ENC(V6_vandnqrt, ICLASS_CJ" 1 001 101 ttttt PP 0 --1uu 101 ddddd") // + +DEF_ENC(V6_vrmpyubi, ICLASS_CJ" 1 001 101 ttttt PP 0 uuuuu 11i ddddd") // + +DEF_ENC(V6_vmpyub, ICLASS_CJ" 1 001 110 ttttt PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_lvsplath, ICLASS_CJ" 1 001 110 ttttt PP 0 ----- 001 ddddd") // +DEF_ENC(V6_lvsplatb, ICLASS_CJ" 1 001 110 ttttt PP 0 ----- 010 ddddd") // + + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 001 --- ----- PP - ----- ----- ---","[#1] Vx32=(Vu32, Rt32)") +DEF_ENC(V6_vtmpyb_acc, ICLASS_CJ" 1 001 000 ttttt PP 1 uuuuu 000 xxxxx") // +DEF_ENC(V6_vtmpybus_acc, ICLASS_CJ" 1 001 000 ttttt PP 1 uuuuu 001 xxxxx") // +DEF_ENC(V6_vtmpyhb_acc, ICLASS_CJ" 1 001 000 ttttt PP 1 uuuuu 010 xxxxx") // +DEF_ENC(V6_vdmpyhb_acc, ICLASS_CJ" 1 001 000 ttttt PP 1 uuuuu 011 xxxxx") // +DEF_ENC(V6_vrmpyub_acc, ICLASS_CJ" 1 001 000 ttttt PP 1 uuuuu 100 xxxxx") // +DEF_ENC(V6_vrmpybus_acc, ICLASS_CJ" 1 001 000 ttttt PP 1 uuuuu 101 xxxxx") // +DEF_ENC(V6_vdmpybus_acc, ICLASS_CJ" 1 001 000 ttttt PP 1 uuuuu 110 xxxxx") // +DEF_ENC(V6_vdmpybus_dv_acc, ICLASS_CJ" 1 001 000 ttttt PP 1 uuuuu 111 xxxxx") // + +DEF_ENC(V6_vdmpyhsusat_acc, ICLASS_CJ" 1 001 001 ttttt PP 1 uuuuu 000 xxxxx") // +DEF_ENC(V6_vdmpyhsuisat_acc,ICLASS_CJ" 1 001 001 ttttt PP 1 uuuuu 001 xxxxx") // +DEF_ENC(V6_vdmpyhisat_acc, ICLASS_CJ" 1 001 001 ttttt PP 1 uuuuu 010 xxxxx") // +DEF_ENC(V6_vdmpyhsat_acc, ICLASS_CJ" 1 001 001 ttttt PP 1 uuuuu 011 xxxxx") // +DEF_ENC(V6_vdmpyhb_dv_acc, ICLASS_CJ" 1 001 001 ttttt PP 1 uuuuu 100 xxxxx") // +DEF_ENC(V6_vmpybus_acc, ICLASS_CJ" 1 001 001 ttttt PP 1 uuuuu 101 xxxxx") // +DEF_ENC(V6_vmpabus_acc, ICLASS_CJ" 1 001 001 ttttt PP 1 uuuuu 110 xxxxx") // +DEF_ENC(V6_vmpahb_acc, ICLASS_CJ" 1 001 001 ttttt PP 1 uuuuu 111 xxxxx") // + +DEF_ENC(V6_vmpyhsat_acc, ICLASS_CJ" 1 001 010 ttttt PP 1 uuuuu 000 xxxxx") // +DEF_ENC(V6_vmpyuh_acc, ICLASS_CJ" 1 001 010 ttttt PP 1 uuuuu 001 xxxxx") // +DEF_ENC(V6_vmpyiwb_acc, ICLASS_CJ" 1 001 010 ttttt PP 1 uuuuu 010 xxxxx") // +DEF_ENC(V6_vmpyiwh_acc, ICLASS_CJ" 1 001 010 ttttt PP 1 uuuuu 011 xxxxx") // +DEF_ENC(V6_vrmpybusi_acc, ICLASS_CJ" 1 001 010 ttttt PP 1 uuuuu 10i xxxxx") // +DEF_ENC(V6_vrsadubi_acc, ICLASS_CJ" 1 001 010 ttttt PP 1 uuuuu 11i xxxxx") // + +DEF_ENC(V6_vdsaduh_acc, ICLASS_CJ" 1 001 011 ttttt PP 1 uuuuu 000 xxxxx") // +DEF_ENC(V6_vmpyihb_acc, ICLASS_CJ" 1 001 011 ttttt PP 1 uuuuu 001 xxxxx") // +DEF_ENC(V6_vaslw_acc, ICLASS_CJ" 1 001 011 ttttt PP 1 uuuuu 010 xxxxx") // +DEF_ENC(V6_vandqrt_acc, ICLASS_CJ" 1 001 011 ttttt PP 1 --0uu 011 xxxxx") // +DEF_ENC(V6_vandnqrt_acc, ICLASS_CJ" 1 001 011 ttttt PP 1 --1uu 011 xxxxx") // +DEF_ENC(V6_vandvrt_acc, ICLASS_CJ" 1 001 011 ttttt PP 1 uuuuu 100 ---xx") // +DEF_ENC(V6_vasrw_acc, ICLASS_CJ" 1 001 011 ttttt PP 1 uuuuu 101 xxxxx") // +DEF_ENC(V6_vrmpyubi_acc, ICLASS_CJ" 1 001 011 ttttt PP 1 uuuuu 11i xxxxx") // + +DEF_ENC(V6_vmpyub_acc, ICLASS_CJ" 1 001 100 ttttt PP 1 uuuuu 000 xxxxx") // +DEF_ENC(V6_vmpyiwub_acc, ICLASS_CJ" 1 001 100 ttttt PP 1 uuuuu 001 xxxxx") // +DEF_ENC(V6_vmpauhb_acc, ICLASS_CJ" 1 001 100 ttttt PP 1 uuuuu 010 xxxxx") // +DEF_ENC(V6_vmpyuhe_acc, ICLASS_CJ" 1 001 100 ttttt PP 1 uuuuu 011 xxxxx") +DEF_ENC(V6_vmpahhsat, ICLASS_CJ" 1 001 100 ttttt PP 1 uuuuu 100 xxxxx") // +DEF_ENC(V6_vmpauhuhsat, ICLASS_CJ" 1 001 100 ttttt PP 1 uuuuu 101 xxxxx") // +DEF_ENC(V6_vmpsuhuhsat, ICLASS_CJ" 1 001 100 ttttt PP 1 uuuuu 110 xxxxx") // +DEF_ENC(V6_vasrh_acc, ICLASS_CJ" 1 001 100 ttttt PP 1 uuuuu 111 xxxxx") // + + + + +DEF_ENC(V6_vinsertwr, ICLASS_CJ" 1 001 101 ttttt PP 1 ----- 001 xxxxx") + +DEF_ENC(V6_vmpabuu_acc, ICLASS_CJ" 1 001 101 ttttt PP 1 uuuuu 100 xxxxx") // +DEF_ENC(V6_vaslh_acc, ICLASS_CJ" 1 001 101 ttttt PP 1 uuuuu 101 xxxxx") // +DEF_ENC(V6_vmpyh_acc, ICLASS_CJ" 1 001 101 ttttt PP 1 uuuuu 110 xxxxx") // + + + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 001 --- ----- PP - ----- ----- ---","[#1] (Vx32, Vy32, Rt32)") +DEF_ENC(V6_vshuff, ICLASS_CJ" 1 001 111 ttttt PP 1 yyyyy 001 xxxxx") // +DEF_ENC(V6_vdeal, ICLASS_CJ" 1 001 111 ttttt PP 1 yyyyy 010 xxxxx") // + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 010 --- ----- PP - ----- ----- ---","[#2] if (Ps) Vd=Vu") +DEF_ENC(V6_vcmov, ICLASS_CJ" 1 010 000 ----- PP - uuuuu -ss ddddd") +DEF_ENC(V6_vncmov, ICLASS_CJ" 1 010 001 ----- PP - uuuuu -ss ddddd") +DEF_ENC(V6_vnccombine, ICLASS_CJ" 1 010 010 vvvvv PP - uuuuu -ss ddddd") +DEF_ENC(V6_vccombine, ICLASS_CJ" 1 010 011 vvvvv PP - uuuuu -ss ddddd") + +DEF_ENC(V6_vrotr, ICLASS_CJ" 1 010 100 vvvvv PP 1 uuuuu 111 ddddd") +DEF_ENC(V6_vasr_into, ICLASS_CJ" 1 010 101 vvvvv PP 1 uuuuu 111 xxxxx") + +/*************************************************************** +* +* Group #3, Uses Q6 Rt8 +* +****************************************************************/ + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 011 --- ----- PP - ----- ----- ---","[#3] Vd32=(Vu32, Vv32, Rt8)") +DEF_ENC(V6_valignb, ICLASS_CJ" 1 011 vvv vvttt PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vlalignb, ICLASS_CJ" 1 011 vvv vvttt PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vasrwh, ICLASS_CJ" 1 011 vvv vvttt PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vasrwhsat, ICLASS_CJ" 1 011 vvv vvttt PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vasrwhrndsat, ICLASS_CJ" 1 011 vvv vvttt PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vasrwuhsat, ICLASS_CJ" 1 011 vvv vvttt PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vasrhubsat, ICLASS_CJ" 1 011 vvv vvttt PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vasrhubrndsat, ICLASS_CJ" 1 011 vvv vvttt PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vasrhbrndsat, ICLASS_CJ" 1 011 vvv vvttt PP 1 uuuuu 000 ddddd") // +DEF_ENC(V6_vlutvvb, ICLASS_CJ" 1 011 vvv vvttt PP 1 uuuuu 001 ddddd") +DEF_ENC(V6_vshuffvdd, ICLASS_CJ" 1 011 vvv vvttt PP 1 uuuuu 011 ddddd") // +DEF_ENC(V6_vdealvdd, ICLASS_CJ" 1 011 vvv vvttt PP 1 uuuuu 100 ddddd") // +DEF_ENC(V6_vlutvvb_oracc, ICLASS_CJ" 1 011 vvv vvttt PP 1 uuuuu 101 xxxxx") +DEF_ENC(V6_vlutvwh, ICLASS_CJ" 1 011 vvv vvttt PP 1 uuuuu 110 ddddd") +DEF_ENC(V6_vlutvwh_oracc, ICLASS_CJ" 1 011 vvv vvttt PP 1 uuuuu 111 xxxxx") + + + +/*************************************************************** +* +* Group #4, No Q6 regs +* +****************************************************************/ + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 100 --- ----- PP 0 ----- ----- ---","[#4] Vd32=(Vu32, Vv32)") +DEF_ENC(V6_vrmpyubv, ICLASS_CJ" 1 100 000 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vrmpybv, ICLASS_CJ" 1 100 000 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vrmpybusv, ICLASS_CJ" 1 100 000 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vdmpyhvsat, ICLASS_CJ" 1 100 000 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vmpybv, ICLASS_CJ" 1 100 000 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vmpyubv, ICLASS_CJ" 1 100 000 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vmpybusv, ICLASS_CJ" 1 100 000 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vmpyhv, ICLASS_CJ" 1 100 000 vvvvv PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vmpyuhv, ICLASS_CJ" 1 100 001 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vmpyhvsrs, ICLASS_CJ" 1 100 001 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vmpyhus, ICLASS_CJ" 1 100 001 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vmpabusv, ICLASS_CJ" 1 100 001 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vmpyih, ICLASS_CJ" 1 100 001 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vand, ICLASS_CJ" 1 100 001 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vor, ICLASS_CJ" 1 100 001 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vxor, ICLASS_CJ" 1 100 001 vvvvv PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vaddw, ICLASS_CJ" 1 100 010 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vaddubsat, ICLASS_CJ" 1 100 010 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vadduhsat, ICLASS_CJ" 1 100 010 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vaddhsat, ICLASS_CJ" 1 100 010 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vaddwsat, ICLASS_CJ" 1 100 010 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vsubb, ICLASS_CJ" 1 100 010 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vsubh, ICLASS_CJ" 1 100 010 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vsubw, ICLASS_CJ" 1 100 010 vvvvv PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vsububsat, ICLASS_CJ" 1 100 011 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vsubuhsat, ICLASS_CJ" 1 100 011 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vsubhsat, ICLASS_CJ" 1 100 011 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vsubwsat, ICLASS_CJ" 1 100 011 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vaddb_dv, ICLASS_CJ" 1 100 011 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vaddh_dv, ICLASS_CJ" 1 100 011 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vaddw_dv, ICLASS_CJ" 1 100 011 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vaddubsat_dv,ICLASS_CJ" 1 100 011 vvvvv PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vadduhsat_dv,ICLASS_CJ" 1 100 100 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vaddhsat_dv, ICLASS_CJ" 1 100 100 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vaddwsat_dv, ICLASS_CJ" 1 100 100 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vsubb_dv, ICLASS_CJ" 1 100 100 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vsubh_dv, ICLASS_CJ" 1 100 100 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vsubw_dv, ICLASS_CJ" 1 100 100 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vsububsat_dv,ICLASS_CJ" 1 100 100 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vsubuhsat_dv,ICLASS_CJ" 1 100 100 vvvvv PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vsubhsat_dv, ICLASS_CJ" 1 100 101 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vsubwsat_dv, ICLASS_CJ" 1 100 101 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vaddubh, ICLASS_CJ" 1 100 101 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vadduhw, ICLASS_CJ" 1 100 101 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vaddhw, ICLASS_CJ" 1 100 101 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vsububh, ICLASS_CJ" 1 100 101 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vsubuhw, ICLASS_CJ" 1 100 101 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vsubhw, ICLASS_CJ" 1 100 101 vvvvv PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vabsdiffub, ICLASS_CJ" 1 100 110 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vabsdiffh, ICLASS_CJ" 1 100 110 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vabsdiffuh, ICLASS_CJ" 1 100 110 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vabsdiffw, ICLASS_CJ" 1 100 110 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vavgub, ICLASS_CJ" 1 100 110 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vavguh, ICLASS_CJ" 1 100 110 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vavgh, ICLASS_CJ" 1 100 110 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vavgw, ICLASS_CJ" 1 100 110 vvvvv PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vnavgub, ICLASS_CJ" 1 100 111 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vnavgh, ICLASS_CJ" 1 100 111 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vnavgw, ICLASS_CJ" 1 100 111 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vavgubrnd, ICLASS_CJ" 1 100 111 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vavguhrnd, ICLASS_CJ" 1 100 111 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vavghrnd, ICLASS_CJ" 1 100 111 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vavgwrnd, ICLASS_CJ" 1 100 111 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vmpabuuv, ICLASS_CJ" 1 100 111 vvvvv PP 0 uuuuu 111 ddddd") // + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 100 --- ----- PP 1 ----- ----- ---","[#4] Vx32=(Vu32, Vv32)") +DEF_ENC(V6_vrmpyubv_acc, ICLASS_CJ" 1 100 000 vvvvv PP 1 uuuuu 000 xxxxx") // +DEF_ENC(V6_vrmpybv_acc, ICLASS_CJ" 1 100 000 vvvvv PP 1 uuuuu 001 xxxxx") // +DEF_ENC(V6_vrmpybusv_acc, ICLASS_CJ" 1 100 000 vvvvv PP 1 uuuuu 010 xxxxx") // +DEF_ENC(V6_vdmpyhvsat_acc, ICLASS_CJ" 1 100 000 vvvvv PP 1 uuuuu 011 xxxxx") // +DEF_ENC(V6_vmpybv_acc, ICLASS_CJ" 1 100 000 vvvvv PP 1 uuuuu 100 xxxxx") // +DEF_ENC(V6_vmpyubv_acc, ICLASS_CJ" 1 100 000 vvvvv PP 1 uuuuu 101 xxxxx") // +DEF_ENC(V6_vmpybusv_acc, ICLASS_CJ" 1 100 000 vvvvv PP 1 uuuuu 110 xxxxx") // +DEF_ENC(V6_vmpyhv_acc, ICLASS_CJ" 1 100 000 vvvvv PP 1 uuuuu 111 xxxxx") // + +DEF_ENC(V6_vmpyuhv_acc, ICLASS_CJ" 1 100 001 vvvvv PP 1 uuuuu 000 xxxxx") // +DEF_ENC(V6_vmpyhus_acc, ICLASS_CJ" 1 100 001 vvvvv PP 1 uuuuu 001 xxxxx") // +DEF_ENC(V6_vaddhw_acc, ICLASS_CJ" 1 100 001 vvvvv PP 1 uuuuu 010 xxxxx") // +DEF_ENC(V6_vmpyowh_64_acc, ICLASS_CJ" 1 100 001 vvvvv PP 1 uuuuu 011 xxxxx") +DEF_ENC(V6_vmpyih_acc, ICLASS_CJ" 1 100 001 vvvvv PP 1 uuuuu 100 xxxxx") // +DEF_ENC(V6_vmpyiewuh_acc, ICLASS_CJ" 1 100 001 vvvvv PP 1 uuuuu 101 xxxxx") // +DEF_ENC(V6_vmpyowh_sacc, ICLASS_CJ" 1 100 001 vvvvv PP 1 uuuuu 110 xxxxx") // +DEF_ENC(V6_vmpyowh_rnd_sacc,ICLASS_CJ" 1 100 001 vvvvv PP 1 uuuuu 111 xxxxx") // + +DEF_ENC(V6_vmpyiewh_acc, ICLASS_CJ" 1 100 010 vvvvv PP 1 uuuuu 000 xxxxx") // + +DEF_ENC(V6_vadduhw_acc, ICLASS_CJ" 1 100 010 vvvvv PP 1 uuuuu 100 xxxxx") // +DEF_ENC(V6_vaddubh_acc, ICLASS_CJ" 1 100 010 vvvvv PP 1 uuuuu 101 xxxxx") // + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 100 100 ----- PP 1 ----- ----- ---","[#4] Qx4=(Vu32, Vv32)") +// Grouped by element size (lsbs), operation (next-lsbs) and operation (next-lsbs) +DEF_ENC(V6_veqb_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 000 000xx") // +DEF_ENC(V6_veqh_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 000 001xx") // +DEF_ENC(V6_veqw_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 000 010xx") // + +DEF_ENC(V6_vgtb_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 000 100xx") // +DEF_ENC(V6_vgth_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 000 101xx") // +DEF_ENC(V6_vgtw_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 000 110xx") // + +DEF_ENC(V6_vgtub_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 001 000xx") // +DEF_ENC(V6_vgtuh_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 001 001xx") // +DEF_ENC(V6_vgtuw_and, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 001 010xx") // + +DEF_ENC(V6_veqb_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 010 000xx") // +DEF_ENC(V6_veqh_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 010 001xx") // +DEF_ENC(V6_veqw_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 010 010xx") // + +DEF_ENC(V6_vgtb_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 010 100xx") // +DEF_ENC(V6_vgth_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 010 101xx") // +DEF_ENC(V6_vgtw_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 010 110xx") // + +DEF_ENC(V6_vgtub_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 011 000xx") // +DEF_ENC(V6_vgtuh_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 011 001xx") // +DEF_ENC(V6_vgtuw_or, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 011 010xx") // + +DEF_ENC(V6_veqb_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 100 000xx") // +DEF_ENC(V6_veqh_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 100 001xx") // +DEF_ENC(V6_veqw_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 100 010xx") // + +DEF_ENC(V6_vgtb_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 100 100xx") // +DEF_ENC(V6_vgth_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 100 101xx") // +DEF_ENC(V6_vgtw_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 100 110xx") // + +DEF_ENC(V6_vgtub_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 101 000xx") // +DEF_ENC(V6_vgtuh_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 101 001xx") // +DEF_ENC(V6_vgtuw_xor, ICLASS_CJ" 1 100 100 vvvvv PP 1 uuuuu 101 010xx") // + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 100 101 ----- PP 1 ----- ----- ---","[#4] Qx4,Vd32=(Vu32, Vv32)") +DEF_ENC(V6_vaddcarry, ICLASS_CJ" 1 100 101 vvvvv PP 1 uuuuu 0xx ddddd") // +DEF_ENC(V6_vsubcarry, ICLASS_CJ" 1 100 101 vvvvv PP 1 uuuuu 1xx ddddd") // + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 100 11- ----- PP 1 ----- ----- ---","[#4] Vx32|=(Vu32, Vv32,#)") +DEF_ENC(V6_vlutvvb_oracci, ICLASS_CJ" 1 100 110 vvvvv PP 1 uuuuu iii xxxxx") // +DEF_ENC(V6_vlutvwh_oracci, ICLASS_CJ" 1 100 111 vvvvv PP 1 uuuuu iii xxxxx") // + + + +/*************************************************************** +* +* Group #5, Reserved/Deprecated. Uses Q6 Rx. Stupid FFT. +* +****************************************************************/ + + + + +/*************************************************************** +* +* Group #6, No Q6 regs +* +****************************************************************/ + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 110 --0 ----- PP 0 ----- ----- ---","[#6] Vd32=Vu32") +DEF_ENC(V6_vabsh, ICLASS_CJ" 1 110 --0 ---00 PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vabsh_sat, ICLASS_CJ" 1 110 --0 ---00 PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vabsw, ICLASS_CJ" 1 110 --0 ---00 PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vabsw_sat, ICLASS_CJ" 1 110 --0 ---00 PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vnot, ICLASS_CJ" 1 110 --0 ---00 PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vdealh, ICLASS_CJ" 1 110 --0 ---00 PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vdealb, ICLASS_CJ" 1 110 --0 ---00 PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vunpackub, ICLASS_CJ" 1 110 --0 ---01 PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vunpackuh, ICLASS_CJ" 1 110 --0 ---01 PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vunpackb, ICLASS_CJ" 1 110 --0 ---01 PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vunpackh, ICLASS_CJ" 1 110 --0 ---01 PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vabsb, ICLASS_CJ" 1 110 --0 ---01 PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vabsb_sat, ICLASS_CJ" 1 110 --0 ---01 PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vshuffh, ICLASS_CJ" 1 110 --0 ---01 PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vshuffb, ICLASS_CJ" 1 110 --0 ---10 PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vzb, ICLASS_CJ" 1 110 --0 ---10 PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vzh, ICLASS_CJ" 1 110 --0 ---10 PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vsb, ICLASS_CJ" 1 110 --0 ---10 PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vsh, ICLASS_CJ" 1 110 --0 ---10 PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vcl0w, ICLASS_CJ" 1 110 --0 ---10 PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vpopcounth, ICLASS_CJ" 1 110 --0 ---10 PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vcl0h, ICLASS_CJ" 1 110 --0 ---10 PP 0 uuuuu 111 ddddd") // + + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 110 --0 ---11 PP 0 ----- ----- ---","[#6] Qd4=Qt4, Qs4") +DEF_ENC(V6_pred_and, ICLASS_CJ" 1 110 tt0 ---11 PP 0 ---ss 000 000dd") // +DEF_ENC(V6_pred_or, ICLASS_CJ" 1 110 tt0 ---11 PP 0 ---ss 000 001dd") // +DEF_ENC(V6_pred_not, ICLASS_CJ" 1 110 --0 ---11 PP 0 ---ss 000 010dd") // +DEF_ENC(V6_pred_xor, ICLASS_CJ" 1 110 tt0 ---11 PP 0 ---ss 000 011dd") // +DEF_ENC(V6_pred_or_n, ICLASS_CJ" 1 110 tt0 ---11 PP 0 ---ss 000 100dd") // +DEF_ENC(V6_pred_and_n, ICLASS_CJ" 1 110 tt0 ---11 PP 0 ---ss 000 101dd") // +DEF_ENC(V6_shuffeqh, ICLASS_CJ" 1 110 tt0 ---11 PP 0 ---ss 000 110dd") // +DEF_ENC(V6_shuffeqw, ICLASS_CJ" 1 110 tt0 ---11 PP 0 ---ss 000 111dd") // + +DEF_ENC(V6_vnormamtw, ICLASS_CJ" 1 110 --0 ---11 PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vnormamth, ICLASS_CJ" 1 110 --0 ---11 PP 0 uuuuu 101 ddddd") // + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 110 --1 ----- PP 0 ----- ----- ---","[#6] Vd32=Vu32,Vv32") +DEF_ENC(V6_vlutvvbi, ICLASS_CJ" 1 110 001 vvvvv PP 0 uuuuu iii ddddd") +DEF_ENC(V6_vlutvwhi, ICLASS_CJ" 1 110 011 vvvvv PP 0 uuuuu iii ddddd") + +DEF_ENC(V6_vaddbsat_dv, ICLASS_CJ" 1 110 101 vvvvv PP 0 uuuuu 000 ddddd") +DEF_ENC(V6_vsubbsat_dv, ICLASS_CJ" 1 110 101 vvvvv PP 0 uuuuu 001 ddddd") +DEF_ENC(V6_vadduwsat_dv, ICLASS_CJ" 1 110 101 vvvvv PP 0 uuuuu 010 ddddd") +DEF_ENC(V6_vsubuwsat_dv, ICLASS_CJ" 1 110 101 vvvvv PP 0 uuuuu 011 ddddd") +DEF_ENC(V6_vaddububb_sat, ICLASS_CJ" 1 110 101 vvvvv PP 0 uuuuu 100 ddddd") +DEF_ENC(V6_vsubububb_sat, ICLASS_CJ" 1 110 101 vvvvv PP 0 uuuuu 101 ddddd") +DEF_ENC(V6_vmpyewuh_64, ICLASS_CJ" 1 110 101 vvvvv PP 0 uuuuu 110 ddddd") + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 110 --0 ----- PP 1 ----- ----- ---","Vx32=Vu32") +DEF_ENC(V6_vunpackob, ICLASS_CJ" 1 110 --0 ---00 PP 1 uuuuu 000 xxxxx") // +DEF_ENC(V6_vunpackoh, ICLASS_CJ" 1 110 --0 ---00 PP 1 uuuuu 001 xxxxx") // +//DEF_ENC(V6_vunpackow, ICLASS_CJ" 1 110 --0 ---00 PP 1 uuuuu 010 xxxxx") // + +DEF_ENC(V6_vhist, ICLASS_CJ" 1 110 --0 ---00 PP 1 -000- 100 -----") +DEF_ENC(V6_vwhist256, ICLASS_CJ" 1 110 --0 ---00 PP 1 -0010 100 -----") +DEF_ENC(V6_vwhist256_sat, ICLASS_CJ" 1 110 --0 ---00 PP 1 -0011 100 -----") +DEF_ENC(V6_vwhist128, ICLASS_CJ" 1 110 --0 ---00 PP 1 -010- 100 -----") +DEF_ENC(V6_vwhist128m, ICLASS_CJ" 1 110 --0 ---00 PP 1 -011i 100 -----") + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 110 --0 ----- PP 1 ----- ----- ---","if (Qv4) Vx32=Vu32") +DEF_ENC(V6_vaddbq, ICLASS_CJ" 1 110 vv0 ---01 PP 1 uuuuu 000 xxxxx") // +DEF_ENC(V6_vaddhq, ICLASS_CJ" 1 110 vv0 ---01 PP 1 uuuuu 001 xxxxx") // +DEF_ENC(V6_vaddwq, ICLASS_CJ" 1 110 vv0 ---01 PP 1 uuuuu 010 xxxxx") // +DEF_ENC(V6_vaddbnq, ICLASS_CJ" 1 110 vv0 ---01 PP 1 uuuuu 011 xxxxx") // +DEF_ENC(V6_vaddhnq, ICLASS_CJ" 1 110 vv0 ---01 PP 1 uuuuu 100 xxxxx") // +DEF_ENC(V6_vaddwnq, ICLASS_CJ" 1 110 vv0 ---01 PP 1 uuuuu 101 xxxxx") // +DEF_ENC(V6_vsubbq, ICLASS_CJ" 1 110 vv0 ---01 PP 1 uuuuu 110 xxxxx") // +DEF_ENC(V6_vsubhq, ICLASS_CJ" 1 110 vv0 ---01 PP 1 uuuuu 111 xxxxx") // + +DEF_ENC(V6_vsubwq, ICLASS_CJ" 1 110 vv0 ---10 PP 1 uuuuu 000 xxxxx") // +DEF_ENC(V6_vsubbnq, ICLASS_CJ" 1 110 vv0 ---10 PP 1 uuuuu 001 xxxxx") // +DEF_ENC(V6_vsubhnq, ICLASS_CJ" 1 110 vv0 ---10 PP 1 uuuuu 010 xxxxx") // +DEF_ENC(V6_vsubwnq, ICLASS_CJ" 1 110 vv0 ---10 PP 1 uuuuu 011 xxxxx") // + +DEF_ENC(V6_vhistq, ICLASS_CJ" 1 110 vv0 ---10 PP 1 --00- 100 -----") +DEF_ENC(V6_vwhist256q, ICLASS_CJ" 1 110 vv0 ---10 PP 1 --010 100 -----") +DEF_ENC(V6_vwhist256q_sat, ICLASS_CJ" 1 110 vv0 ---10 PP 1 --011 100 -----") +DEF_ENC(V6_vwhist128q, ICLASS_CJ" 1 110 vv0 ---10 PP 1 --10- 100 -----") +DEF_ENC(V6_vwhist128qm, ICLASS_CJ" 1 110 vv0 ---10 PP 1 --11i 100 -----") + + +DEF_ENC(V6_vandvqv, ICLASS_CJ" 1 110 vv0 ---11 PP 1 uuuuu 000 ddddd") +DEF_ENC(V6_vandvnqv, ICLASS_CJ" 1 110 vv0 ---11 PP 1 uuuuu 001 ddddd") + + +DEF_ENC(V6_vprefixqb, ICLASS_CJ" 1 110 vv0 ---11 PP 1 --000 010 ddddd") // +DEF_ENC(V6_vprefixqh, ICLASS_CJ" 1 110 vv0 ---11 PP 1 --001 010 ddddd") // +DEF_ENC(V6_vprefixqw, ICLASS_CJ" 1 110 vv0 ---11 PP 1 --010 010 ddddd") // + + + + +DEF_ENC(V6_vassign, ICLASS_CJ" 1 110 --0 ---11 PP 1 uuuuu 111 ddddd") + +DEF_ENC(V6_valignbi, ICLASS_CJ" 1 110 001 vvvvv PP 1 uuuuu iii ddddd") +DEF_ENC(V6_vlalignbi, ICLASS_CJ" 1 110 011 vvvvv PP 1 uuuuu iii ddddd") +DEF_ENC(V6_vswap, ICLASS_CJ" 1 110 101 vvvvv PP 1 uuuuu -tt ddddd") // +DEF_ENC(V6_vmux, ICLASS_CJ" 1 110 111 vvvvv PP 1 uuuuu -tt ddddd") // + + + +/*************************************************************** +* +* Group #7, No Q6 regs +* +****************************************************************/ + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 111 --- ----- PP 0 ----- ----- ---","[#7] Vd32=(Vu32, Vv32)") +DEF_ENC(V6_vaddbsat, ICLASS_CJ" 1 111 000 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vminub, ICLASS_CJ" 1 111 000 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vminuh, ICLASS_CJ" 1 111 000 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vminh, ICLASS_CJ" 1 111 000 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vminw, ICLASS_CJ" 1 111 000 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vmaxub, ICLASS_CJ" 1 111 000 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vmaxuh, ICLASS_CJ" 1 111 000 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vmaxh, ICLASS_CJ" 1 111 000 vvvvv PP 0 uuuuu 111 ddddd") // + + +DEF_ENC(V6_vaddclbh, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 000 ddddd") // +DEF_ENC(V6_vaddclbw, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 001 ddddd") // + +DEF_ENC(V6_vavguw, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 010 ddddd") // +DEF_ENC(V6_vavguwrnd, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 011 ddddd") // +DEF_ENC(V6_vavgb, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 100 ddddd") // +DEF_ENC(V6_vavgbrnd, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 101 ddddd") // +DEF_ENC(V6_vnavgb, ICLASS_CJ" 1 111 000 vvvvv PP 1 uuuuu 110 ddddd") // + + +DEF_ENC(V6_vmaxw, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vdelta, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vsubbsat, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vrdelta, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vminb, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vmaxb, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vsatuwuh, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vdealb4w, ICLASS_CJ" 1 111 001 vvvvv PP 0 uuuuu 111 ddddd") // + + +DEF_ENC(V6_vmpyowh_rnd, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vshuffeb, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vshuffob, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vshufeh, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vshufoh, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vshufoeh, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vshufoeb, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vcombine, ICLASS_CJ" 1 111 010 vvvvv PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vmpyieoh, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vadduwsat, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vsathub, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vsatwh, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vroundwh, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 100 ddddd") +DEF_ENC(V6_vroundwuh, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 101 ddddd") +DEF_ENC(V6_vroundhb, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 110 ddddd") +DEF_ENC(V6_vroundhub, ICLASS_CJ" 1 111 011 vvvvv PP 0 uuuuu 111 ddddd") + +DEF_FIELDROW_DESC32( ICLASS_CJ" 1 111 100 ----- PP - ----- ----- ---","[#7] Qd4=(Vu32, Vv32)") +DEF_ENC(V6_veqb, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 000 000dd") // +DEF_ENC(V6_veqh, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 000 001dd") // +DEF_ENC(V6_veqw, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 000 010dd") // + +DEF_ENC(V6_vgtb, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 000 100dd") // +DEF_ENC(V6_vgth, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 000 101dd") // +DEF_ENC(V6_vgtw, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 000 110dd") // + +DEF_ENC(V6_vgtub, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 001 000dd") // +DEF_ENC(V6_vgtuh, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 001 001dd") // +DEF_ENC(V6_vgtuw, ICLASS_CJ" 1 111 100 vvvvv PP 0 uuuuu 001 010dd") // + + +DEF_ENC(V6_vasrwv, ICLASS_CJ" 1 111 101 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vlsrwv, ICLASS_CJ" 1 111 101 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vlsrhv, ICLASS_CJ" 1 111 101 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vasrhv, ICLASS_CJ" 1 111 101 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vaslwv, ICLASS_CJ" 1 111 101 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vaslhv, ICLASS_CJ" 1 111 101 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vaddb, ICLASS_CJ" 1 111 101 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vaddh, ICLASS_CJ" 1 111 101 vvvvv PP 0 uuuuu 111 ddddd") // + + +DEF_ENC(V6_vmpyiewuh, ICLASS_CJ" 1 111 110 vvvvv PP 0 uuuuu 000 ddddd") +DEF_ENC(V6_vmpyiowh, ICLASS_CJ" 1 111 110 vvvvv PP 0 uuuuu 001 ddddd") +DEF_ENC(V6_vpackeb, ICLASS_CJ" 1 111 110 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vpackeh, ICLASS_CJ" 1 111 110 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vsubuwsat, ICLASS_CJ" 1 111 110 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vpackhub_sat,ICLASS_CJ" 1 111 110 vvvvv PP 0 uuuuu 101 ddddd") // +DEF_ENC(V6_vpackhb_sat, ICLASS_CJ" 1 111 110 vvvvv PP 0 uuuuu 110 ddddd") // +DEF_ENC(V6_vpackwuh_sat,ICLASS_CJ" 1 111 110 vvvvv PP 0 uuuuu 111 ddddd") // + +DEF_ENC(V6_vpackwh_sat, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 000 ddddd") // +DEF_ENC(V6_vpackob, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 001 ddddd") // +DEF_ENC(V6_vpackoh, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 010 ddddd") // +DEF_ENC(V6_vrounduhub, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 011 ddddd") // +DEF_ENC(V6_vrounduwuh, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 100 ddddd") // +DEF_ENC(V6_vmpyewuh, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 101 ddddd") +DEF_ENC(V6_vmpyowh, ICLASS_CJ" 1 111 111 vvvvv PP 0 uuuuu 111 ddddd") + + +#endif /* NO MMVEC */ diff --git a/target/hexagon/imported/mmvec/ext.idef b/target/hexagon/imported/mmvec/ext.idef new file mode 100644 index 0000000000..8ca5a606e1 --- /dev/null +++ b/target/hexagon/imported/mmvec/ext.idef @@ -0,0 +1,2606 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +/****************************************************************************** + * + * HOYA: MULTI MEDIA INSTRUCITONS + * + ******************************************************************************/ + +#ifndef EXTINSN +#define EXTINSN Q6INSN +#define __SELF_DEF_EXTINSN 1 +#endif + +#ifndef NO_MMVEC + +#define DO_FOR_EACH_CODE(WIDTH, CODE) \ +{ \ + fHIDE(int i;) \ + fVFOREACH(WIDTH, i) {\ + CODE ;\ + } \ +} + + + + +#define ITERATOR_INSN_ANY_SLOT(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + + + +#define ITERATOR_INSN2_ANY_SLOT(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_ANY_SLOT(WIDTH,TAG,SYNTAX2,DESCR,CODE) + +#define ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA_DV), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + + +#define ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX2,DESCR,CODE) + + +#define ITERATOR_INSN_SHIFT_SLOT(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VS), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + + + +#define ITERATOR_INSN_SHIFT_SLOT_VV_LATE(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VS), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN2_SHIFT_SLOT(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_SHIFT_SLOT(WIDTH,TAG,SYNTAX2,DESCR,CODE) + +#define ITERATOR_INSN_PERMUTE_SLOT(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN2_PERMUTE_SLOT(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_PERMUTE_SLOT(WIDTH,TAG,SYNTAX2,DESCR,CODE) + +#define ITERATOR_INSN_PERMUTE_SLOT_DEP(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP), + + +#define ITERATOR_INSN2_PERMUTE_SLOT_DEP(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_PERMUTE_SLOT_DEP(WIDTH,TAG,SYNTAX2,DESCR,CODE) + +#define ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP_VS), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC_DEP(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP_VS), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN2_PERMUTE_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX2,DESCR,CODE) + +#define ITERATOR_INSN_MPY_SLOT(WIDTH,TAG, SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, \ +ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN_MPY_SLOT_LATE(WIDTH,TAG, SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, \ +ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN2_MPY_SLOT(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_MPY_SLOT(WIDTH,TAG,SYNTAX2,DESCR,CODE) + +#define ITERATOR_INSN2_MPY_SLOT_LATE(WIDTH,TAG, SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_MPY_SLOT_LATE(WIDTH,TAG, SYNTAX2,DESCR,CODE) + + +#define ITERATOR_INSN_MPY_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX_DV), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_MPY_SLOT_DOUBLE_VEC(WIDTH,TAG,SYNTAX2,DESCR,CODE) + + + + +#define ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC2(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX_DV,A_CVI_VX_VSRC0_IS_DST), DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN_SLOT2_DOUBLE_VEC(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX_DV,A_RESTRICT_SLOT2ONLY), DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN_VHISTLIKE(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_4SLOT), \ +DESCR, fHIDE(mmvector_t input;) input = fTMPVDATA(); DO_FOR_EACH_CODE(WIDTH, CODE)) + + + + + +/****************************************************************************************** +* +* MMVECTOR MEMORY OPERATIONS - NO NAPALI V1 +* +*******************************************************************************************/ + + + +#define ITERATOR_INSN_MPY_SLOT_DOUBLE_VEC_NOV1(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX_DV), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC_NOV1(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_MPY_SLOT_DOUBLE_VEC_NOV1(WIDTH,TAG,SYNTAX2,DESCR,CODE) + + + +#define ITERATOR_INSN_SHIFT_SLOT_NOV1(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VS), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN2_SHIFT_SLOT_NOV1(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_SHIFT_SLOT_NOV1(WIDTH,TAG,SYNTAX2,DESCR,CODE) + + +#define ITERATOR_INSN_ANY_SLOT_NOV1(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN2_ANY_SLOT_NOV1(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_ANY_SLOT_NOV1(WIDTH,TAG,SYNTAX2,DESCR,CODE) + + +#define ITERATOR_INSN_MPY_SLOT_NOV1(WIDTH,TAG, SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, \ +ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN_PERMUTE_SLOT_NOV1(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN2_PERMUTE_SLOTT_NOV1(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_PERMUTE_SLOT(WIDTH,TAG,SYNTAX2,DESCR,CODE) + +#define ITERATOR_INSN_PERMUTE_SLOT_DEPT_NOV1(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP), + + +#define ITERATOR_INSN2_PERMUTE_SLOT_DEPT_NOV1(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_PERMUTE_SLOT_DEP_NOV1(WIDTH,TAG,SYNTAX2,DESCR,CODE) + +#define ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC_NOV1(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP_VS), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC_DEPT_NOV1(WIDTH,TAG,SYNTAX,DESCR,CODE) \ +EXTINSN(V6_##TAG, SYNTAX, ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP_VS), \ +DESCR, DO_FOR_EACH_CODE(WIDTH, CODE)) + +#define ITERATOR_INSN2_PERMUTE_SLOT_DOUBLE_VEC_NOV1(WIDTH,TAG,SYNTAX,SYNTAX2,DESCR,CODE) \ +ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC_NOV1(WIDTH,TAG,SYNTAX2,DESCR,CODE) + +#define NARROWING_SHIFT_NOV1(ITERSIZE,TAG,DSTM,DSTTYPE,SRCTYPE,SYNOPTS,SATFUNC,RNDFUNC,SHAMTMASK) \ +ITERATOR_INSN_SHIFT_SLOT_NOV1(ITERSIZE,TAG, \ +"Vd32." #DSTTYPE "=vasr(Vu32." #SRCTYPE ",Vv32." #SRCTYPE ",Rt8)" #SYNOPTS, \ +"Vector shift right and shuffle", \ + fHIDE(int )shamt = RtV & SHAMTMASK; \ + DSTM(0,VdV.SRCTYPE[i],SATFUNC(RNDFUNC(VvV.SRCTYPE[i],shamt) >> shamt)); \ + DSTM(1,VdV.SRCTYPE[i],SATFUNC(RNDFUNC(VuV.SRCTYPE[i],shamt) >> shamt))) + +#define MMVEC_AVGS_NOV1(TYPE,TYPE2,DESCR, WIDTH, DEST,SRC)\ +ITERATOR_INSN2_ANY_SLOT_NOV1(WIDTH,vavg##TYPE, "Vd32=vavg"TYPE2"(Vu32,Vv32)", "Vd32."#DEST"=vavg(Vu32."#SRC",Vv32."#SRC")", "Vector Average "DESCR, VdV.DEST[i] = fVAVGS( WIDTH, VuV.SRC[i], VvV.SRC[i])) \ +ITERATOR_INSN2_ANY_SLOT_NOV1(WIDTH,vavg##TYPE##rnd, "Vd32=vavg"TYPE2"(Vu32,Vv32):rnd", "Vd32."#DEST"=vavg(Vu32."#SRC",Vv32."#SRC"):rnd", "Vector Average % Round"DESCR, VdV.DEST[i] = fVAVGSRND( WIDTH, VuV.SRC[i], VvV.SRC[i])) \ +ITERATOR_INSN2_ANY_SLOT_NOV1(WIDTH,vnavg##TYPE, "Vd32=vnavg"TYPE2"(Vu32,Vv32)", "Vd32."#DEST"=vnavg(Vu32."#SRC",Vv32."#SRC")", "Vector Negative Average "DESCR, VdV.DEST[i] = fVNAVGS( WIDTH, VuV.SRC[i], VvV.SRC[i])) + + #define MMVEC_AVGU_NOV1(TYPE,TYPE2,DESCR, WIDTH, DEST,SRC)\ +ITERATOR_INSN2_ANY_SLOT_NOV1(WIDTH,vavg##TYPE, "Vd32=vavg"TYPE2"(Vu32,Vv32)", "Vd32."#DEST"=vavg(Vu32."#SRC",Vv32."#SRC")", "Vector Average "DESCR, VdV.DEST[i] = fVAVGU( WIDTH, VuV.SRC[i], VvV.SRC[i])) \ +ITERATOR_INSN2_ANY_SLOT_NOV1(WIDTH,vavg##TYPE##rnd, "Vd32=vavg"TYPE2"(Vu32,Vv32):rnd", "Vd32."#DEST"=vavg(Vu32."#SRC",Vv32."#SRC"):rnd", "Vector Average % Round"DESCR, VdV.DEST[i] = fVAVGURND(WIDTH, VuV.SRC[i], VvV.SRC[i])) + + + +/****************************************************************************************** +* +* MMVECTOR MEMORY OPERATIONS +* +*******************************************************************************************/ + +#define MMVEC_EACH_EA(TAG,DESCR,ATTRIB,NT,SYNTAXA,SYNTAXB,BEH) \ +EXTINSN(V6_##TAG##_pi, SYNTAXA "(Rx32++#s3)" NT SYNTAXB,ATTRIB,DESCR,{ fEA_REG(RxV); BEH; fPM_I(RxV,VEC_SCALE(siV)); }) \ +EXTINSN(V6_##TAG##_ai, SYNTAXA "(Rt32+#s4)" NT SYNTAXB,ATTRIB,DESCR,{ fEA_RI(RtV,VEC_SCALE(siV)); BEH;}) \ +EXTINSN(V6_##TAG##_ppu, SYNTAXA "(Rx32++Mu2)" NT SYNTAXB,ATTRIB,DESCR,{ fEA_REG(RxV); BEH; fPM_M(RxV,MuV); }) \ + + +#define MMVEC_COND_EACH_EA_TRUE(TAG,DESCR,ATTRIB,NT,SYNTAXA,SYNTAXB,SYNTAXP,BEH) \ +EXTINSN(V6_##TAG##_pred_pi, "if (" #SYNTAXP "4) " SYNTAXA "(Rx32++#s3)" NT SYNTAXB, ATTRIB,DESCR, { if (fLSBOLD(SYNTAXP##V)) { fEA_REG(RxV); BEH; fPM_I(RxV,siV*fVECSIZE()); } else {CANCEL;}}) \ +EXTINSN(V6_##TAG##_pred_ai, "if (" #SYNTAXP "4) " SYNTAXA "(Rt32+#s4)" NT SYNTAXB, ATTRIB,DESCR, { if (fLSBOLD(SYNTAXP##V)) { fEA_RI(RtV,siV*fVECSIZE()); BEH;} else {CANCEL;}}) \ +EXTINSN(V6_##TAG##_pred_ppu, "if (" #SYNTAXP "4) " SYNTAXA "(Rx32++Mu2)" NT SYNTAXB,ATTRIB,DESCR, { if (fLSBOLD(SYNTAXP##V)) { fEA_REG(RxV); BEH; fPM_M(RxV,MuV); } else {CANCEL;}}) \ + +#define MMVEC_COND_EACH_EA_FALSE(TAG,DESCR,ATTRIB,NT,SYNTAXA,SYNTAXB,SYNTAXP,BEH) \ +EXTINSN(V6_##TAG##_npred_pi, "if (!" #SYNTAXP "4) " SYNTAXA "(Rx32++#s3)" NT SYNTAXB,ATTRIB,DESCR,{ if (fLSBOLDNOT(SYNTAXP##V)) { fEA_REG(RxV); BEH; fPM_I(RxV,siV*fVECSIZE()); } else {CANCEL;}}) \ +EXTINSN(V6_##TAG##_npred_ai, "if (!" #SYNTAXP "4) " SYNTAXA "(Rt32+#s4)" NT SYNTAXB,ATTRIB,DESCR, { if (fLSBOLDNOT(SYNTAXP##V)) { fEA_RI(RtV,siV*fVECSIZE()); BEH;} else {CANCEL;}}) \ +EXTINSN(V6_##TAG##_npred_ppu, "if (!" #SYNTAXP "4) " SYNTAXA "(Rx32++Mu2)" NT SYNTAXB,ATTRIB,DESCR,{ if (fLSBOLDNOT(SYNTAXP##V)) { fEA_REG(RxV); BEH; fPM_M(RxV,MuV); } else {CANCEL;}}) + +#define MMVEC_COND_EACH_EA(TAG,DESCR,ATTRIB,NT,SYNTAXA,SYNTAXB,SYNTAXP,BEH) \ +MMVEC_COND_EACH_EA_TRUE(TAG,DESCR,ATTRIB,NT,SYNTAXA,SYNTAXB,SYNTAXP,BEH) \ +MMVEC_COND_EACH_EA_FALSE(TAG,DESCR,ATTRIB,NT,SYNTAXA,SYNTAXB,SYNTAXP,BEH) + + +#define VEC_SCALE(X) X*fVECSIZE() + + +#define MMVEC_LD(TAG,DESCR,ATTRIB,NT) MMVEC_EACH_EA(TAG,DESCR,ATTRIB,NT,"Vd32=vmem","",fLOADMMV(EA,VdV)) +#define MMVEC_LDC(TAG,DESCR,ATTRIB,NT) MMVEC_EACH_EA(TAG##_cur,DESCR,ATTRIB,NT,"Vd32.cur=vmem","",fLOADMMV(EA,VdV)) +#define MMVEC_LDT(TAG,DESCR,ATTRIB,NT) MMVEC_EACH_EA(TAG##_tmp,DESCR,ATTRIB,NT,"Vd32.tmp=vmem","",fLOADMMV(EA,VdV)) +#define MMVEC_LDU(TAG,DESCR,ATTRIB,NT) MMVEC_EACH_EA(TAG,DESCR,ATTRIB,NT,"Vd32=vmemu","",fLOADMMVU(EA,VdV)) + + +#define MMVEC_STQ(TAG,DESCR,ATTRIB,NT) \ +MMVEC_EACH_EA(TAG##_qpred,DESCR,ATTRIB,NT,"if (Qv4) vmem","=Vs32",fSTOREMMVQ(EA,VsV,QvV)) \ +MMVEC_EACH_EA(TAG##_nqpred,DESCR,ATTRIB,NT,"if (!Qv4) vmem","=Vs32",fSTOREMMVNQ(EA,VsV,QvV)) + +/**************************************************************** +* MAPPING FOR VMEMs +****************************************************************/ + +#define ATTR_VMEM A_EXTENSION,A_CVI,A_CVI_VM +#define ATTR_VMEMU A_EXTENSION,A_CVI,A_CVI_VM,A_CVI_VP + + +MMVEC_LD(vL32b, "Aligned Vector Load", ATTRIBS(ATTR_VMEM,A_LOAD,A_CVI_VA),) +MMVEC_LDC(vL32b, "Aligned Vector Load Cur", ATTRIBS(ATTR_VMEM,A_LOAD,A_CVI_NEW,A_CVI_VA),) +MMVEC_LDT(vL32b, "Aligned Vector Load Tmp", ATTRIBS(ATTR_VMEM,A_LOAD,A_CVI_TMP),) + +MMVEC_COND_EACH_EA(vL32b,"Conditional Aligned Vector Load",ATTRIBS(ATTR_VMEM,A_LOAD,A_CVI_VA),,"Vd32=vmem",,Pv,fLOADMMV(EA,VdV);) +MMVEC_COND_EACH_EA(vL32b_cur,"Conditional Aligned Vector Load Cur",ATTRIBS(ATTR_VMEM,A_LOAD,A_CVI_VA,A_CVI_NEW),,"Vd32.cur=vmem",,Pv,fLOADMMV(EA,VdV);) +MMVEC_COND_EACH_EA(vL32b_tmp,"Conditional Aligned Vector Load Tmp",ATTRIBS(ATTR_VMEM,A_LOAD,A_CVI_TMP),,"Vd32.tmp=vmem",,Pv,fLOADMMV(EA,VdV);) + +MMVEC_EACH_EA(vS32b,"Aligned Vector Store",ATTRIBS(ATTR_VMEM,A_STORE,A_RESTRICT_SLOT0ONLY,A_CVI_VA),,"vmem","=Vs32",fSTOREMMV(EA,VsV)) +MMVEC_COND_EACH_EA(vS32b,"Aligned Vector Store",ATTRIBS(ATTR_VMEM,A_STORE,A_RESTRICT_SLOT0ONLY,A_CVI_VA),,"vmem","=Vs32",Pv,fSTOREMMV(EA,VsV)) + + +MMVEC_STQ(vS32b, "Aligned Vector Store", ATTRIBS(ATTR_VMEM,A_STORE,A_RESTRICT_SLOT0ONLY,A_CVI_VA),) + +MMVEC_LDU(vL32Ub, "Unaligned Vector Load", ATTRIBS(ATTR_VMEMU,A_LOAD,A_RESTRICT_NOSLOT1),) + +MMVEC_EACH_EA(vS32Ub,"Unaligned Vector Store",ATTRIBS(ATTR_VMEMU,A_STORE,A_RESTRICT_NOSLOT1),,"vmemu","=Vs32",fSTOREMMVU(EA,VsV)) + +MMVEC_COND_EACH_EA(vS32Ub,"Unaligned Vector Store",ATTRIBS(ATTR_VMEMU,A_STORE,A_RESTRICT_NOSLOT1),,"vmemu","=Vs32",Pv,fSTOREMMVU(EA,VsV)) + +MMVEC_EACH_EA(vS32b_new,"Aligned Vector Store New",ATTRIBS(ATTR_VMEM,A_STORE,A_CVI_NEW,A_DOTNEWVALUE,A_RESTRICT_SLOT0ONLY),,"vmem","=Os8.new",fSTOREMMV(EA,fNEWVREG(OsN))) + +// V65 store relase, zero byte store +MMVEC_EACH_EA(vS32b_srls,"Aligned Vector Scatter Release",ATTRIBS(ATTR_VMEM,A_STORE,A_CVI_SCATTER_RELEASE,A_CVI_NEW,A_RESTRICT_SLOT0ONLY),,"vmem",":scatter_release",fSTORERELEASE(EA,0)) + + + +MMVEC_COND_EACH_EA(vS32b_new,"Aligned Vector Store New",ATTRIBS(ATTR_VMEM,A_STORE,A_CVI_NEW,A_DOTNEWVALUE,A_RESTRICT_SLOT0ONLY),,"vmem","=Os8.new",Pv,fSTOREMMV(EA,fNEWVREG(OsN))) + + +/****************************************************************************************** +* +* MMVECTOR MEMORY OPERATIONS - NON TEMPORAL +* +*******************************************************************************************/ + +#define ATTR_VMEM_NT A_EXTENSION,A_CVI,A_CVI_VM + +MMVEC_EACH_EA(vS32b_nt,"Aligned Vector Store - Non temporal",ATTRIBS(ATTR_VMEM_NT,A_STORE,A_RESTRICT_SLOT0ONLY,A_CVI_VA),":nt","vmem","=Vs32",fSTOREMMV(EA,VsV)) +MMVEC_COND_EACH_EA(vS32b_nt,"Aligned Vector Store - Non temporal",ATTRIBS(ATTR_VMEM_NT,A_STORE,A_RESTRICT_SLOT0ONLY,A_CVI_VA),":nt","vmem","=Vs32",Pv,fSTOREMMV(EA,VsV)) + +MMVEC_EACH_EA(vS32b_nt_new,"Aligned Vector Store New - Non temporal",ATTRIBS(ATTR_VMEM_NT,A_STORE,A_CVI_NEW,A_DOTNEWVALUE,A_RESTRICT_SLOT0ONLY),":nt","vmem","=Os8.new",fSTOREMMV(EA,fNEWVREG(OsN))) +MMVEC_COND_EACH_EA(vS32b_nt_new,"Aligned Vector Store New - Non temporal",ATTRIBS(ATTR_VMEM_NT,A_STORE,A_CVI_NEW,A_DOTNEWVALUE,A_RESTRICT_SLOT0ONLY),":nt","vmem","=Os8.new",Pv,fSTOREMMV(EA,fNEWVREG(OsN))) + + +MMVEC_STQ(vS32b_nt, "Aligned Vector Store - Non temporal", ATTRIBS(ATTR_VMEM_NT,A_STORE,A_RESTRICT_SLOT0ONLY,A_CVI_VA),":nt") + +MMVEC_LD(vL32b_nt, "Aligned Vector Load - Non temporal", ATTRIBS(ATTR_VMEM_NT,A_LOAD,A_CVI_VA),":nt") +MMVEC_LDC(vL32b_nt, "Aligned Vector Load Cur - Non temporal", ATTRIBS(ATTR_VMEM_NT,A_LOAD,A_CVI_NEW,A_CVI_VA),":nt") +MMVEC_LDT(vL32b_nt, "Aligned Vector Load Tmp - Non temporal", ATTRIBS(ATTR_VMEM_NT,A_LOAD,A_CVI_TMP),":nt") + +MMVEC_COND_EACH_EA(vL32b_nt,"Conditional Aligned Vector Load",ATTRIBS(ATTR_VMEM_NT,A_CVI_VA),,"Vd32=vmem",":nt",Pv,fLOADMMV(EA,VdV);) +MMVEC_COND_EACH_EA(vL32b_nt_cur,"Conditional Aligned Vector Load Cur",ATTRIBS(ATTR_VMEM_NT,A_CVI_VA,A_CVI_NEW),,"Vd32.cur=vmem",":nt",Pv,fLOADMMV(EA,VdV);) +MMVEC_COND_EACH_EA(vL32b_nt_tmp,"Conditional Aligned Vector Load Tmp",ATTRIBS(ATTR_VMEM_NT,A_CVI_TMP),,"Vd32.tmp=vmem",":nt",Pv,fLOADMMV(EA,VdV);) + + +#undef VEC_SCALE + + +/*************************************************** + * Vector Alignment + ************************************************/ + +#define VALIGNB(SHIFT) \ + fHIDE(int i;) \ + for(i = 0; i < fVBYTES(); i++) {\ + VdV.ub[i] = (i+SHIFT>=fVBYTES()) ? VuV.ub[i+SHIFT-fVBYTES()] : VvV.ub[i+SHIFT];\ + } + +EXTINSN(V6_valignb, "Vd32=valign(Vu32,Vv32,Rt8)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP),"Align Two vectors by Rt8 as control", +{ + unsigned shift = RtV & (fVBYTES()-1); + VALIGNB(shift) +}) +EXTINSN(V6_vlalignb, "Vd32=vlalign(Vu32,Vv32,Rt8)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP),"Align Two vectors by Rt8 as control", +{ + unsigned shift = fVBYTES() - (RtV & (fVBYTES()-1)); + VALIGNB(shift) +}) +EXTINSN(V6_valignbi, "Vd32=valign(Vu32,Vv32,#u3)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP),"Align Two vectors by #u3 as control", +{ + VALIGNB(uiV) +}) +EXTINSN(V6_vlalignbi,"Vd32=vlalign(Vu32,Vv32,#u3)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP),"Align Two vectors by #u3 as control", +{ + unsigned shift = fVBYTES() - uiV; + VALIGNB(shift) +}) + +EXTINSN(V6_vror, "Vd32=vror(Vu32,Rt32)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP), +"Align Two vectors by Rt32 as control", +{ + fHIDE(int k;) + for (k=0;k> (RtV & (SIZE-1)))) \ +ITERATOR_INSN2_SHIFT_SLOT(SIZE,vasl##TYPE, "Vd32=vasl" #TYPE "(Vu32,Rt32)","Vd32."#TYPE"=vasl(Vu32."#TYPE",Rt32)", "Vector arithmetic shift left " DESC, VdV.TYPE[i] = (VuV.TYPE[i] << (RtV & (SIZE-1)))) \ +ITERATOR_INSN2_SHIFT_SLOT(SIZE,vlsr##TYPE, "Vd32=vlsr" #TYPE "(Vu32,Rt32)","Vd32.u"#TYPE"=vlsr(Vu32.u"#TYPE",Rt32)", "Vector logical shift right " DESC, VdV.u##TYPE[i] = (VuV.u##TYPE[i] >> (RtV & (SIZE-1)))) \ +ITERATOR_INSN2_SHIFT_SLOT(SIZE,vasr##TYPE##v,"Vd32=vasr" #TYPE "(Vu32,Vv32)","Vd32."#TYPE"=vasr(Vu32."#TYPE",Vv32."#TYPE")", "Vector arithmetic shift right " DESC, VdV.TYPE[i] = fBIDIR_ASHIFTR(VuV.TYPE[i], fSXTN((LOGSIZE+1),SIZE,VvV.TYPE[i]),CASTTYPE)) \ +ITERATOR_INSN2_SHIFT_SLOT(SIZE,vasl##TYPE##v,"Vd32=vasl" #TYPE "(Vu32,Vv32)","Vd32."#TYPE"=vasl(Vu32."#TYPE",Vv32."#TYPE")", "Vector arithmetic shift left " DESC, VdV.TYPE[i] = fBIDIR_ASHIFTL(VuV.TYPE[i], fSXTN((LOGSIZE+1),SIZE,VvV.TYPE[i]),CASTTYPE)) \ +ITERATOR_INSN2_SHIFT_SLOT(SIZE,vlsr##TYPE##v,"Vd32=vlsr" #TYPE "(Vu32,Vv32)","Vd32."#TYPE"=vlsr(Vu32."#TYPE",Vv32."#TYPE")", "Vector logical shift right " DESC, VdV.u##TYPE[i] = fBIDIR_LSHIFTR(VuV.u##TYPE[i], fSXTN((LOGSIZE+1),SIZE,VvV.TYPE[i]),CASTTYPE)) \ + +V_SHIFT(w, "word", 32,5,4_4) +V_SHIFT(h, "halfword", 16,4,2_2) + +ITERATOR_INSN_SHIFT_SLOT(8,vlsrb,"Vd32.ub=vlsr(Vu32.ub,Rt32)","vec log shift right bytes", VdV.b[i] = VuV.ub[i] >> (RtV & 0x7)) + +ITERATOR_INSN2_SHIFT_SLOT(32,vrotr,"Vd32=vrotr(Vu32,Vv32)","Vd32.uw=vrotr(Vu32.uw,Vv32.uw)","Vector word rotate right", VdV.uw[i] = ((VuV.uw[i] >> (VvV.uw[i] & 0x1f)) | (VuV.uw[i] << (32 - (VvV.uw[i] & 0x1f))))) + +/********************************************************************* + * MMVECTOR SHIFT AND PERMUTE + * ******************************************************************/ + +ITERATOR_INSN2_PERMUTE_SLOT_DOUBLE_VEC(32,vasr_into,"Vxx32=vasrinto(Vu32,Vv32)","Vxx32.w=vasrinto(Vu32.w,Vv32.w)","ASR vector 1 elements and overlay dropping bits to MSB of vector 2 elements", + fHIDE(int64_t ) shift = (fSE32_64(VuV.w[i]) << 32); + fHIDE(int64_t ) mask = (((fSE32_64(VxxV.v[0].w[i])) << 32) | fZE32_64(VxxV.v[0].w[i])); + fHIDE(int64_t) lomask = (((fSE32_64(1)) << 32) - 1); + fHIDE(int ) count = -(0x40 & VvV.w[i]) + (VvV.w[i] & 0x3f); + fHIDE(int64_t ) result = (count == -0x40) ? 0 : (((count < 0) ? ((shift << -(count)) | (mask & (lomask << -(count)))) : ((shift >> count) | (mask & (lomask >> count))))); + VxxV.v[1].w[i] = ((result >> 32) & 0xffffffff); + VxxV.v[0].w[i] = (result & 0xffffffff)) + +#define NEW_NARROWING_SHIFT 1 + +#if NEW_NARROWING_SHIFT +#define NARROWING_SHIFT(ITERSIZE,TAG,DSTM,DSTTYPE,SRCTYPE,SYNOPTS,SATFUNC,RNDFUNC,SHAMTMASK) \ +ITERATOR_INSN_SHIFT_SLOT(ITERSIZE,TAG, \ +"Vd32." #DSTTYPE "=vasr(Vu32." #SRCTYPE ",Vv32." #SRCTYPE ",Rt8)" #SYNOPTS, \ +"Vector shift right and shuffle", \ + fHIDE(int )shamt = RtV & SHAMTMASK; \ + DSTM(0,VdV.SRCTYPE[i],SATFUNC(RNDFUNC(VvV.SRCTYPE[i],shamt) >> shamt)); \ + DSTM(1,VdV.SRCTYPE[i],SATFUNC(RNDFUNC(VuV.SRCTYPE[i],shamt) >> shamt))) + + + + + +/* WORD TO HALF*/ + +NARROWING_SHIFT(32,vasrwh,fSETHALF,h,w,,fECHO,fVNOROUND,0xF) +NARROWING_SHIFT(32,vasrwhsat,fSETHALF,h,w,:sat,fVSATH,fVNOROUND,0xF) +NARROWING_SHIFT(32,vasrwhrndsat,fSETHALF,h,w,:rnd:sat,fVSATH,fVROUND,0xF) +NARROWING_SHIFT(32,vasrwuhrndsat,fSETHALF,uh,w,:rnd:sat,fVSATUH,fVROUND,0xF) +NARROWING_SHIFT(32,vasrwuhsat,fSETHALF,uh,w,:sat,fVSATUH,fVNOROUND,0xF) +NARROWING_SHIFT(32,vasruwuhrndsat,fSETHALF,uh,uw,:rnd:sat,fVSATUH,fVROUND,0xF) + +NARROWING_SHIFT_NOV1(32,vasruwuhsat,fSETHALF,uh,uw,:sat,fVSATUH,fVNOROUND,0xF) +NARROWING_SHIFT(16,vasrhubsat,fSETBYTE,ub,h,:sat,fVSATUB,fVNOROUND,0x7) +NARROWING_SHIFT(16,vasrhubrndsat,fSETBYTE,ub,h,:rnd:sat,fVSATUB,fVROUND,0x7) +NARROWING_SHIFT(16,vasrhbsat,fSETBYTE,b,h,:sat,fVSATB,fVNOROUND,0x7) +NARROWING_SHIFT(16,vasrhbrndsat,fSETBYTE,b,h,:rnd:sat,fVSATB,fVROUND,0x7) + +NARROWING_SHIFT_NOV1(16,vasruhubsat,fSETBYTE,ub,uh,:sat,fVSATUB,fVNOROUND,0x7) +NARROWING_SHIFT_NOV1(16,vasruhubrndsat,fSETBYTE,ub,uh,:rnd:sat,fVSATUB,fVROUND,0x7) + +#else +ITERATOR_INSN2_SHIFT_SLOT(32,vasrwh,"Vd32=vasrwh(Vu32,Vv32,Rt8)","Vd32.h=vasr(Vu32.w,Vv32.w,Rt8)", +"Vector arithmetic shift right words, shuffle even halfwords", + fSETHALF(0,VdV.w[i], (VvV.w[i] >> (RtV & 0xF))); + fSETHALF(1,VdV.w[i], (VuV.w[i] >> (RtV & 0xF)))) + + +ITERATOR_INSN2_SHIFT_SLOT(32,vasrwhsat,"Vd32=vasrwh(Vu32,Vv32,Rt8):sat","Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):sat", +"Vector arithmetic shift right words, shuffle even halfwords", + fSETHALF(0,VdV.w[i], fVSATH(VvV.w[i] >> (RtV & 0xF))); + fSETHALF(1,VdV.w[i], fVSATH(VuV.w[i] >> (RtV & 0xF)))) + +ITERATOR_INSN2_SHIFT_SLOT(32,vasrwhrndsat,"Vd32=vasrwh(Vu32,Vv32,Rt8):rnd:sat","Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat", +"Vector arithmetic shift right words, shuffle even halfwords", + fHIDE(int ) shamt = RtV & 0xF; + fSETHALF(0,VdV.w[i], fVSATH( (VvV.w[i] + fBIDIR_ASHIFTL(1,(shamt-1),4_8) ) >> shamt)); + fSETHALF(1,VdV.w[i], fVSATH( (VuV.w[i] + fBIDIR_ASHIFTL(1,(shamt-1),4_8) ) >> shamt))) + +ITERATOR_INSN2_SHIFT_SLOT(32,vasrwuhrndsat,"Vd32=vasrwuh(Vu32,Vv32,Rt8):rnd:sat","Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat", +"Vector arithmetic shift right words, shuffle even halfwords", + fHIDE(int ) shamt = RtV & 0xF; + fSETHALF(0,VdV.w[i], fVSATUH( (VvV.w[i] + fBIDIR_ASHIFTL(1,(shamt-1),4_8) ) >> shamt)); + fSETHALF(1,VdV.w[i], fVSATUH( (VuV.w[i] + fBIDIR_ASHIFTL(1,(shamt-1),4_8) ) >> shamt))) + +ITERATOR_INSN2_SHIFT_SLOT(32,vasrwuhsat,"Vd32=vasrwuh(Vu32,Vv32,Rt8):sat","Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):sat", +"Vector arithmetic shift right words, shuffle even halfwords", + fSETHALF(0, VdV.uw[i], fVSATUH(VvV.w[i] >> (RtV & 0xF))); + fSETHALF(1, VdV.uw[i], fVSATUH(VuV.w[i] >> (RtV & 0xF)))) + +ITERATOR_INSN2_SHIFT_SLOT(32,vasruwuhrndsat,"Vd32=vasruwuh(Vu32,Vv32,Rt8):rnd:sat","Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):rnd:sat", +"Vector arithmetic shift right words, shuffle even halfwords", + fHIDE(int ) shamt = RtV & 0xF; + fSETHALF(0,VdV.w[i], fVSATUH( (VvV.uw[i] + fBIDIR_ASHIFTL(1,(shamt-1),4_8) ) >> shamt)); + fSETHALF(1,VdV.w[i], fVSATUH( (VuV.uw[i] + fBIDIR_ASHIFTL(1,(shamt-1),4_8) ) >> shamt))) +#endif + + + +ITERATOR_INSN2_SHIFT_SLOT(32,vroundwh,"Vd32=vroundwh(Vu32,Vv32):sat","Vd32.h=vround(Vu32.w,Vv32.w):sat", +"Vector round words to halves, shuffle resultant halfwords", + fSETHALF(0, VdV.uw[i], fVSATH((VvV.w[i] + fCONSTLL(0x8000)) >> 16)); + fSETHALF(1, VdV.uw[i], fVSATH((VuV.w[i] + fCONSTLL(0x8000)) >> 16))) + +ITERATOR_INSN2_SHIFT_SLOT(32,vroundwuh,"Vd32=vroundwuh(Vu32,Vv32):sat","Vd32.uh=vround(Vu32.w,Vv32.w):sat", +"Vector round words to halves, shuffle resultant halfwords", + fSETHALF(0, VdV.uw[i], fVSATUH((VvV.w[i] + fCONSTLL(0x8000)) >> 16)); + fSETHALF(1, VdV.uw[i], fVSATUH((VuV.w[i] + fCONSTLL(0x8000)) >> 16))) + +ITERATOR_INSN2_SHIFT_SLOT(32,vrounduwuh,"Vd32=vrounduwuh(Vu32,Vv32):sat","Vd32.uh=vround(Vu32.uw,Vv32.uw):sat", +"Vector round words to halves, shuffle resultant halfwords", + fSETHALF(0, VdV.uw[i], fVSATUH((VvV.uw[i] + fCONSTLL(0x8000)) >> 16)); + fSETHALF(1, VdV.uw[i], fVSATUH((VuV.uw[i] + fCONSTLL(0x8000)) >> 16))) + + + + + +/* HALF TO BYTE*/ + +ITERATOR_INSN2_SHIFT_SLOT(16,vroundhb,"Vd32=vroundhb(Vu32,Vv32):sat","Vd32.b=vround(Vu32.h,Vv32.h):sat", +"Vector round words to halves, shuffle resultant halfwords", + fSETBYTE(0, VdV.uh[i], fVSATB((VvV.h[i] + 0x80) >> 8)); + fSETBYTE(1, VdV.uh[i], fVSATB((VuV.h[i] + 0x80) >> 8))) + +ITERATOR_INSN2_SHIFT_SLOT(16,vroundhub,"Vd32=vroundhub(Vu32,Vv32):sat","Vd32.ub=vround(Vu32.h,Vv32.h):sat", +"Vector round words to halves, shuffle resultant halfwords", + fSETBYTE(0, VdV.uh[i], fVSATUB((VvV.h[i] + 0x80) >> 8)); + fSETBYTE(1, VdV.uh[i], fVSATUB((VuV.h[i] + 0x80) >> 8))) + +ITERATOR_INSN2_SHIFT_SLOT(16,vrounduhub,"Vd32=vrounduhub(Vu32,Vv32):sat","Vd32.ub=vround(Vu32.uh,Vv32.uh):sat", +"Vector round words to halves, shuffle resultant halfwords", + fSETBYTE(0, VdV.uh[i], fVSATUB((VvV.uh[i] + 0x80) >> 8)); + fSETBYTE(1, VdV.uh[i], fVSATUB((VuV.uh[i] + 0x80) >> 8))) + + +ITERATOR_INSN2_SHIFT_SLOT(32,vaslw_acc,"Vx32+=vaslw(Vu32,Rt32)","Vx32.w+=vasl(Vu32.w,Rt32)", +"Vector shift add word", + VxV.w[i] += (VuV.w[i] << (RtV & (32-1)))) + +ITERATOR_INSN2_SHIFT_SLOT(32,vasrw_acc,"Vx32+=vasrw(Vu32,Rt32)","Vx32.w+=vasr(Vu32.w,Rt32)", +"Vector shift add word", + VxV.w[i] += (VuV.w[i] >> (RtV & (32-1)))) + +ITERATOR_INSN2_SHIFT_SLOT_NOV1(16,vaslh_acc,"Vx32+=vaslh(Vu32,Rt32)","Vx32.h+=vasl(Vu32.h,Rt32)", +"Vector shift add halfword", + VxV.h[i] += (VuV.h[i] << (RtV & (16-1)))) + +ITERATOR_INSN2_SHIFT_SLOT_NOV1(16,vasrh_acc,"Vx32+=vasrh(Vu32,Rt32)","Vx32.h+=vasr(Vu32.h,Rt32)", +"Vector shift add halfword", + VxV.h[i] += (VuV.h[i] >> (RtV & (16-1)))) + +/************************************************************************** +* +* MMVECTOR ELEMENT-WISE ARITHMETIC +* +**************************************************************************/ + +/************************************************************************** +* MACROS GO IN MACROS.DEF NOT HERE!!! +**************************************************************************/ + + +#define MMVEC_ABSDIFF(TYPE,TYPE2,DESCR, WIDTH, DEST,SRC)\ +ITERATOR_INSN2_MPY_SLOT(WIDTH, vabsdiff##TYPE, "Vd32=vabsdiff"TYPE2"(Vu32,Vv32)" ,"Vd32."#DEST"=vabsdiff(Vu32."#SRC",Vv32."#SRC")" , "Vector Absolute of Difference "DESCR, VdV.DEST[i] = (VuV.SRC[i] > VvV.SRC[i]) ? (VuV.SRC[i] - VvV.SRC[i]) : (VvV.SRC[i] - VuV.SRC[i])) + +#define MMVEC_ADDU_SAT(TYPE,TYPE2,DESCR, WIDTH, DEST,SRC)\ +ITERATOR_INSN2_ANY_SLOT(WIDTH, vadd##TYPE##sat, "Vd32=vadd"TYPE2"(Vu32,Vv32):sat" , "Vd32."#DEST"=vadd(Vu32."#SRC",Vv32."#SRC"):sat", "Vector Add & Saturate "DESCR, VdV.DEST[i] = fVUADDSAT(WIDTH, VuV.SRC[i], VvV.SRC[i]))\ +ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(WIDTH, vadd##TYPE##sat_dv, "Vdd32=vadd"TYPE2"(Vuu32,Vvv32):sat", "Vdd32."#DEST"=vadd(Vuu32."#SRC",Vvv32."#SRC"):sat", "Double Vector Add & Saturate "DESCR, VddV.v[0].DEST[i] = fVUADDSAT(WIDTH, VuuV.v[0].SRC[i],VvvV.v[0].SRC[i]); VddV.v[1].DEST[i] = fVUADDSAT(WIDTH, VuuV.v[1].SRC[i],VvvV.v[1].SRC[i]))\ +ITERATOR_INSN2_ANY_SLOT(WIDTH, vsub##TYPE##sat, "Vd32=vsub"TYPE2"(Vu32,Vv32):sat", "Vd32."#DEST"=vsub(Vu32."#SRC",Vv32."#SRC"):sat", "Vector Add & Saturate "DESCR, VdV.DEST[i] = fVUSUBSAT(WIDTH, VuV.SRC[i], VvV.SRC[i]))\ +ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(WIDTH, vsub##TYPE##sat_dv, "Vdd32=vsub"TYPE2"(Vuu32,Vvv32):sat", "Vdd32."#DEST"=vsub(Vuu32."#SRC",Vvv32."#SRC"):sat", "Double Vector Add & Saturate "DESCR, VddV.v[0].DEST[i] = fVUSUBSAT(WIDTH, VuuV.v[0].SRC[i],VvvV.v[0].SRC[i]); VddV.v[1].DEST[i] = fVUSUBSAT(WIDTH, VuuV.v[1].SRC[i],VvvV.v[1].SRC[i]))\ + +#define MMVEC_ADDS_SAT(TYPE,TYPE2,DESCR, WIDTH,DEST,SRC)\ +ITERATOR_INSN2_ANY_SLOT(WIDTH, vadd##TYPE##sat, "Vd32=vadd"TYPE2"(Vu32,Vv32):sat" , "Vd32."#DEST"=vadd(Vu32."#SRC",Vv32."#SRC"):sat", "Vector Add & Saturate "DESCR, VdV.DEST[i] = fVSADDSAT(WIDTH, VuV.SRC[i], VvV.SRC[i]))\ +ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(WIDTH, vadd##TYPE##sat_dv, "Vdd32=vadd"TYPE2"(Vuu32,Vvv32):sat", "Vdd32."#DEST"=vadd(Vuu32."#SRC",Vvv32."#SRC"):sat", "Double Vector Add & Saturate "DESCR, VddV.v[0].DEST[i] = fVSADDSAT(WIDTH, VuuV.v[0].SRC[i], VvvV.v[0].SRC[i]); VddV.v[1].DEST[i] = fVSADDSAT(WIDTH, VuuV.v[1].SRC[i], VvvV.v[1].SRC[i]))\ +ITERATOR_INSN2_ANY_SLOT(WIDTH, vsub##TYPE##sat, "Vd32=vsub"TYPE2"(Vu32,Vv32):sat", "Vd32."#DEST"=vsub(Vu32."#SRC",Vv32."#SRC"):sat", "Vector Add & Saturate "DESCR, VdV.DEST[i] = fVSSUBSAT(WIDTH, VuV.SRC[i], VvV.SRC[i]))\ +ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(WIDTH, vsub##TYPE##sat_dv, "Vdd32=vsub"TYPE2"(Vuu32,Vvv32):sat", "Vdd32."#DEST"=vsub(Vuu32."#SRC",Vvv32."#SRC"):sat", "Double Vector Add & Saturate "DESCR, VddV.v[0].DEST[i] = fVSSUBSAT(WIDTH, VuuV.v[0].SRC[i], VvvV.v[0].SRC[i]); VddV.v[1].DEST[i] = fVSSUBSAT(WIDTH, VuuV.v[1].SRC[i], VvvV.v[1].SRC[i]))\ + +#define MMVEC_AVGU(TYPE,TYPE2,DESCR, WIDTH, DEST,SRC)\ +ITERATOR_INSN2_ANY_SLOT(WIDTH,vavg##TYPE, "Vd32=vavg"TYPE2"(Vu32,Vv32)", "Vd32."#DEST"=vavg(Vu32."#SRC",Vv32."#SRC")", "Vector Average "DESCR, VdV.DEST[i] = fVAVGU( WIDTH, VuV.SRC[i], VvV.SRC[i])) \ +ITERATOR_INSN2_ANY_SLOT(WIDTH,vavg##TYPE##rnd, "Vd32=vavg"TYPE2"(Vu32,Vv32):rnd", "Vd32."#DEST"=vavg(Vu32."#SRC",Vv32."#SRC"):rnd", "Vector Average % Round"DESCR, VdV.DEST[i] = fVAVGURND(WIDTH, VuV.SRC[i], VvV.SRC[i])) + + + +#define MMVEC_AVGS(TYPE,TYPE2,DESCR, WIDTH, DEST,SRC)\ +ITERATOR_INSN2_ANY_SLOT(WIDTH,vavg##TYPE, "Vd32=vavg"TYPE2"(Vu32,Vv32)", "Vd32."#DEST"=vavg(Vu32."#SRC",Vv32."#SRC")", "Vector Average "DESCR, VdV.DEST[i] = fVAVGS( WIDTH, VuV.SRC[i], VvV.SRC[i])) \ +ITERATOR_INSN2_ANY_SLOT(WIDTH,vavg##TYPE##rnd, "Vd32=vavg"TYPE2"(Vu32,Vv32):rnd", "Vd32."#DEST"=vavg(Vu32."#SRC",Vv32."#SRC"):rnd", "Vector Average % Round"DESCR, VdV.DEST[i] = fVAVGSRND( WIDTH, VuV.SRC[i], VvV.SRC[i])) \ +ITERATOR_INSN2_ANY_SLOT(WIDTH,vnavg##TYPE, "Vd32=vnavg"TYPE2"(Vu32,Vv32)", "Vd32."#DEST"=vnavg(Vu32."#SRC",Vv32."#SRC")", "Vector Negative Average "DESCR, VdV.DEST[i] = fVNAVGS( WIDTH, VuV.SRC[i], VvV.SRC[i])) + + + + + + + +#define MMVEC_ADDWRAP(TYPE,TYPE2, DESCR, WIDTH , DEST,SRC)\ +ITERATOR_INSN2_ANY_SLOT(WIDTH, vadd##TYPE, "Vd32=vadd"TYPE2"(Vu32,Vv32)" , "Vd32."#DEST"=vadd(Vu32."#SRC",Vv32."#SRC")", "Vector Add "DESCR, VdV.DEST[i] = VuV.SRC[i] + VvV.SRC[i])\ +ITERATOR_INSN2_ANY_SLOT(WIDTH, vsub##TYPE, "Vd32=vsub"TYPE2"(Vu32,Vv32)" , "Vd32."#DEST"=vsub(Vu32."#SRC",Vv32."#SRC")", "Vector Sub "DESCR, VdV.DEST[i] = VuV.SRC[i] - VvV.SRC[i])\ +ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(WIDTH, vadd##TYPE##_dv, "Vdd32=vadd"TYPE2"(Vuu32,Vvv32)" , "Vdd32."#DEST"=vadd(Vuu32."#SRC",Vvv32."#SRC")", "Double Vector Add "DESCR, VddV.v[0].DEST[i] = VuuV.v[0].SRC[i] + VvvV.v[0].SRC[i]; VddV.v[1].DEST[i] = VuuV.v[1].SRC[i] + VvvV.v[1].SRC[i])\ +ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(WIDTH, vsub##TYPE##_dv, "Vdd32=vsub"TYPE2"(Vuu32,Vvv32)" , "Vdd32."#DEST"=vsub(Vuu32."#SRC",Vvv32."#SRC")", "Double Vector Sub "DESCR, VddV.v[0].DEST[i] = VuuV.v[0].SRC[i] - VvvV.v[0].SRC[i]; VddV.v[1].DEST[i] = VuuV.v[1].SRC[i] - VvvV.v[1].SRC[i]) \ + + + + + +/* Wrapping Adds */ +MMVEC_ADDWRAP(b, "b", "Byte", 8, b, b) +MMVEC_ADDWRAP(h, "h", "Halfword", 16, h, h) +MMVEC_ADDWRAP(w, "w", "Word", 32, w, w) + +/* Saturating Adds */ +MMVEC_ADDU_SAT(ub, "ub", "Unsigned Byte", 8, ub, ub) +MMVEC_ADDU_SAT(uh, "uh", "Unsigned Halfword", 16, uh, uh) +MMVEC_ADDU_SAT(uw, "uw", "Unsigned word", 32, uw, uw) +MMVEC_ADDS_SAT(b, "b", "byte", 8, b, b) +MMVEC_ADDS_SAT(h, "h", "Halfword", 16, h, h) +MMVEC_ADDS_SAT(w, "w", "Word", 32, w, w) + + +/* Averaging Instructions */ +MMVEC_AVGU(ub,"ub", "Unsigned Byte", 8, ub, ub) +MMVEC_AVGU(uh,"uh", "Unsigned Halfword", 16, uh, uh) +MMVEC_AVGU_NOV1(uw,"uw", "Unsigned Word", 32, uw, uw) +MMVEC_AVGS_NOV1(b, "b", "Byte", 8, b, b) +MMVEC_AVGS(h, "h", "Halfword", 16, h, h) +MMVEC_AVGS(w, "w", "Word", 32, w, w) + + +/* Absolute Difference */ +MMVEC_ABSDIFF(ub,"ub", "Unsigned Byte", 8, ub, ub) +MMVEC_ABSDIFF(uh,"uh", "Unsigned Halfword", 16, uh, uh) +MMVEC_ABSDIFF(h,"h", "Halfword", 16, uh, h) +MMVEC_ABSDIFF(w,"w", "Word", 32, uw, w) + +ITERATOR_INSN2_ANY_SLOT(8,vnavgub, "Vd32=vnavgub(Vu32,Vv32)", "Vd32.b=vnavg(Vu32.ub,Vv32.ub)", +"Vector Negative Average Unsigned Byte", VdV.b[i] = fVNAVGU(8, VuV.ub[i], VvV.ub[i])) + +ITERATOR_INSN_ANY_SLOT(32,vaddcarrysat,"Vd32.w=vadd(Vu32.w,Vv32.w,Qs4):carry:sat","add w/carry and saturate", +VdV.w[i] = fVSATW(VuV.w[i]+VvV.w[i]+fGETQBIT(QsV,i*4))) + +ITERATOR_INSN_ANY_SLOT(32,vaddcarry,"Vd32.w=vadd(Vu32.w,Vv32.w,Qx4):carry","add w/carry", +VdV.w[i] = VuV.w[i]+VvV.w[i]+fGETQBIT(QxV,i*4); +fSETQBITS(QxV,4,0xF,4*i,-fCARRY_FROM_ADD32(VuV.w[i],VvV.w[i],fGETQBIT(QxV,i*4)))) + +ITERATOR_INSN_ANY_SLOT(32,vsubcarry,"Vd32.w=vsub(Vu32.w,Vv32.w,Qx4):carry","add w/carry", +VdV.w[i] = VuV.w[i]+~VvV.w[i]+fGETQBIT(QxV,i*4); +fSETQBITS(QxV,4,0xF,4*i,-fCARRY_FROM_ADD32(VuV.w[i],~VvV.w[i],fGETQBIT(QxV,i*4)))) + +ITERATOR_INSN_ANY_SLOT(32,vaddcarryo,"Vd32.w,Qe4=vadd(Vu32.w,Vv32.w):carry","add w/carry out-only", +VdV.w[i] = VuV.w[i]+VvV.w[i]; +fSETQBITS(QeV,4,0xF,4*i,-fCARRY_FROM_ADD32(VuV.w[i],VvV.w[i],0))) + +ITERATOR_INSN_ANY_SLOT(32,vsubcarryo,"Vd32.w,Qe4=vsub(Vu32.w,Vv32.w):carry","subtract w/carry out-only", +VdV.w[i] = VuV.w[i]+~VvV.w[i]+1; +fSETQBITS(QeV,4,0xF,4*i,-fCARRY_FROM_ADD32(VuV.w[i],~VvV.w[i],1))) + + +ITERATOR_INSN_ANY_SLOT(32,vsatdw,"Vd32.w=vsatdw(Vu32.w,Vv32.w)","Saturate from 64-bits (higher 32-bits come from first vector) to 32-bits",VdV.w[i] = fVSATDW(VuV.w[i],VvV.w[i])) + + +#define MMVEC_ADDSAT_MIX(TAGEND,SATF,WIDTH,DEST,SRC1,SRC2)\ +ITERATOR_INSN_ANY_SLOT(WIDTH, vadd##TAGEND,"Vd32."#DEST"=vadd(Vu32."#SRC1",Vv32."#SRC2"):sat", "Vector Add mixed", VdV.DEST[i] = SATF(VuV.SRC1[i] + VvV.SRC2[i]))\ +ITERATOR_INSN_ANY_SLOT(WIDTH, vsub##TAGEND,"Vd32."#DEST"=vsub(Vu32."#SRC1",Vv32."#SRC2"):sat", "Vector Sub mixed", VdV.DEST[i] = SATF(VuV.SRC1[i] - VvV.SRC2[i]))\ + +MMVEC_ADDSAT_MIX(ububb_sat,fVSATUB,8,ub,ub,b) + +/**************************** +* WIDENING +****************************/ + + + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vaddubh,"Vdd32=vaddub(Vu32,Vv32)","Vdd32.h=vadd(Vu32.ub,Vv32.ub)", +"Vector addition with widen into two vectors", + VddV.v[0].h[i] = fZE8_16(fGETUBYTE(0, VuV.uh[i])) + fZE8_16(fGETUBYTE(0, VvV.uh[i])); + VddV.v[1].h[i] = fZE8_16(fGETUBYTE(1, VuV.uh[i])) + fZE8_16(fGETUBYTE(1, VvV.uh[i]))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vsububh,"Vdd32=vsubub(Vu32,Vv32)","Vdd32.h=vsub(Vu32.ub,Vv32.ub)", +"Vector subtraction with widen into two vectors", + VddV.v[0].h[i] = fZE8_16(fGETUBYTE(0, VuV.uh[i])) - fZE8_16(fGETUBYTE(0, VvV.uh[i])); + VddV.v[1].h[i] = fZE8_16(fGETUBYTE(1, VuV.uh[i])) - fZE8_16(fGETUBYTE(1, VvV.uh[i]))) + + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vaddhw,"Vdd32=vaddh(Vu32,Vv32)","Vdd32.w=vadd(Vu32.h,Vv32.h)", +"Vector addition with widen into two vectors", + VddV.v[0].w[i] = fGETHALF(0, VuV.w[i]) + fGETHALF(0, VvV.w[i]); + VddV.v[1].w[i] = fGETHALF(1, VuV.w[i]) + fGETHALF(1, VvV.w[i])) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vsubhw,"Vdd32=vsubh(Vu32,Vv32)","Vdd32.w=vsub(Vu32.h,Vv32.h)", +"Vector subtraction with widen into two vectors", + VddV.v[0].w[i] = fGETHALF(0, VuV.w[i]) - fGETHALF(0, VvV.w[i]); + VddV.v[1].w[i] = fGETHALF(1, VuV.w[i]) - fGETHALF(1, VvV.w[i])) + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vadduhw,"Vdd32=vadduh(Vu32,Vv32)","Vdd32.w=vadd(Vu32.uh,Vv32.uh)", +"Vector addition with widen into two vectors", + VddV.v[0].w[i] = fZE16_32(fGETUHALF(0, VuV.uw[i])) + fZE16_32(fGETUHALF(0, VvV.uw[i])); + VddV.v[1].w[i] = fZE16_32(fGETUHALF(1, VuV.uw[i])) + fZE16_32(fGETUHALF(1, VvV.uw[i]))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vsubuhw,"Vdd32=vsubuh(Vu32,Vv32)","Vdd32.w=vsub(Vu32.uh,Vv32.uh)", +"Vector subtraction with widen into two vectors", + VddV.v[0].w[i] = fZE16_32(fGETUHALF(0, VuV.uw[i])) - fZE16_32(fGETUHALF(0, VvV.uw[i])); + VddV.v[1].w[i] = fZE16_32(fGETUHALF(1, VuV.uw[i])) - fZE16_32(fGETUHALF(1, VvV.uw[i]))) + + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vaddhw_acc,"Vxx32+=vaddh(Vu32,Vv32)","Vxx32.w+=vadd(Vu32.h,Vv32.h)", +"Vector addition with widen into two vectors", + VxxV.v[0].w[i] += fGETHALF(0, VuV.w[i]) + fGETHALF(0, VvV.w[i]); + VxxV.v[1].w[i] += fGETHALF(1, VuV.w[i]) + fGETHALF(1, VvV.w[i])) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vadduhw_acc,"Vxx32+=vadduh(Vu32,Vv32)","Vxx32.w+=vadd(Vu32.uh,Vv32.uh)", +"Vector addition with widen into two vectors", + VxxV.v[0].w[i] += fGETUHALF(0, VuV.w[i]) + fGETUHALF(0, VvV.w[i]); + VxxV.v[1].w[i] += fGETUHALF(1, VuV.w[i]) + fGETUHALF(1, VvV.w[i])) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vaddubh_acc,"Vxx32+=vaddub(Vu32,Vv32)","Vxx32.h+=vadd(Vu32.ub,Vv32.ub)", +"Vector addition with widen into two vectors", + VxxV.v[0].h[i] += fGETUBYTE(0, VuV.h[i]) + fGETUBYTE(0, VvV.h[i]); + VxxV.v[1].h[i] += fGETUBYTE(1, VuV.h[i]) + fGETUBYTE(1, VvV.h[i])) + + +/**************************** +* Conditional +****************************/ + +#define CONDADDSUB(WIDTH,TAGEND,LHSYN,RHSYN,DESCR,LHBEH,RHBEH) \ +ITERATOR_INSN2_ANY_SLOT(WIDTH,vadd##TAGEND##q,"if (Qv4."#TAGEND") "LHSYN"+="RHSYN,"if (Qv4) "LHSYN"+="RHSYN,DESCR,LHBEH=fCONDMASK##WIDTH(QvV,i,LHBEH+RHBEH,LHBEH)) \ +ITERATOR_INSN2_ANY_SLOT(WIDTH,vsub##TAGEND##q,"if (Qv4."#TAGEND") "LHSYN"-="RHSYN,"if (Qv4) "LHSYN"-="RHSYN,DESCR,LHBEH=fCONDMASK##WIDTH(QvV,i,LHBEH-RHBEH,LHBEH)) \ +ITERATOR_INSN2_ANY_SLOT(WIDTH,vadd##TAGEND##nq,"if (!Qv4."#TAGEND") "LHSYN"+="RHSYN,"if (!Qv4) "LHSYN"+="RHSYN,DESCR,LHBEH=fCONDMASK##WIDTH(QvV,i,LHBEH,LHBEH+RHBEH)) \ +ITERATOR_INSN2_ANY_SLOT(WIDTH,vsub##TAGEND##nq,"if (!Qv4."#TAGEND") "LHSYN"-="RHSYN,"if (!Qv4) "LHSYN"-="RHSYN,DESCR,LHBEH=fCONDMASK##WIDTH(QvV,i,LHBEH,LHBEH-RHBEH)) \ + +CONDADDSUB(8,b,"Vx32.b","Vu32.b","Conditional add/sub Byte",VxV.ub[i],VuV.ub[i]) +CONDADDSUB(16,h,"Vx32.h","Vu32.h","Conditional add/sub Half",VxV.h[i],VuV.h[i]) +CONDADDSUB(32,w,"Vx32.w","Vu32.w","Conditional add/sub Word",VxV.w[i],VuV.w[i]) + +/***************************************************** + ABSOLUTE VALUES +*****************************************************/ +// V65 +ITERATOR_INSN2_ANY_SLOT_NOV1(8,vabsb, "Vd32=vabsb(Vu32)", "Vd32.b=vabs(Vu32.b)", "Vector absolute value of bytes", VdV.b[i] = fABS(VuV.b[i])) +ITERATOR_INSN2_ANY_SLOT_NOV1(8,vabsb_sat, "Vd32=vabsb(Vu32):sat", "Vd32.b=vabs(Vu32.b):sat", "Vector absolute value of bytes", VdV.b[i] = fVSATB(fABS(fSE8_16(VuV.b[i])))) + + +ITERATOR_INSN2_ANY_SLOT(16,vabsh, "Vd32=vabsh(Vu32)", "Vd32.h=vabs(Vu32.h)", "Vector absolute value of halfwords", VdV.h[i] = fABS(VuV.h[i])) +ITERATOR_INSN2_ANY_SLOT(16,vabsh_sat, "Vd32=vabsh(Vu32):sat", "Vd32.h=vabs(Vu32.h):sat", "Vector absolute value of halfwords", VdV.h[i] = fVSATH(fABS(fSE16_32(VuV.h[i])))) +ITERATOR_INSN2_ANY_SLOT(32,vabsw, "Vd32=vabsw(Vu32)", "Vd32.w=vabs(Vu32.w)", "Vector absolute value of words", VdV.w[i] = fABS(VuV.w[i])) +ITERATOR_INSN2_ANY_SLOT(32,vabsw_sat, "Vd32=vabsw(Vu32):sat", "Vd32.w=vabs(Vu32.w):sat", "Vector absolute value of words", VdV.w[i] = fVSATW(fABS(fSE32_64(VuV.w[i])))) + + +/************************************************************************** + * MMVECTOR MULTIPLICATIONS + * ************************************************************************/ + + +/* Byte by Byte */ +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpybv,"Vdd32=vmpyb(Vu32,Vv32)","Vdd32.h=vmpy(Vu32.b,Vv32.b)", +"Vector absolute value of words", + VddV.v[0].h[i] = fMPY8SS(fGETBYTE(0, VuV.h[i]), fGETBYTE(0, VvV.h[i])); + VddV.v[1].h[i] = fMPY8SS(fGETBYTE(1, VuV.h[i]), fGETBYTE(1, VvV.h[i]))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpybv_acc,"Vxx32+=vmpyb(Vu32,Vv32)","Vxx32.h+=vmpy(Vu32.b,Vv32.b)", +"Vector absolute value of words", + VxxV.v[0].h[i] += fMPY8SS(fGETBYTE(0, VuV.h[i]), fGETBYTE(0, VvV.h[i])); + VxxV.v[1].h[i] += fMPY8SS(fGETBYTE(1, VuV.h[i]), fGETBYTE(1, VvV.h[i]))) + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpyubv,"Vdd32=vmpyub(Vu32,Vv32)","Vdd32.uh=vmpy(Vu32.ub,Vv32.ub)", +"Vector absolute value of words", + VddV.v[0].uh[i] = fMPY8UU(fGETUBYTE(0, VuV.uh[i]), fGETUBYTE(0, VvV.uh[i]) ); + VddV.v[1].uh[i] = fMPY8UU(fGETUBYTE(1, VuV.uh[i]), fGETUBYTE(1, VvV.uh[i]) )) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpyubv_acc,"Vxx32+=vmpyub(Vu32,Vv32)","Vxx32.uh+=vmpy(Vu32.ub,Vv32.ub)", +"Vector absolute value of words", + VxxV.v[0].uh[i] += fMPY8UU(fGETUBYTE(0, VuV.uh[i]), fGETUBYTE(0, VvV.uh[i]) ); + VxxV.v[1].uh[i] += fMPY8UU(fGETUBYTE(1, VuV.uh[i]), fGETUBYTE(1, VvV.uh[i]) )) + + + + + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpybusv,"Vdd32=vmpybus(Vu32,Vv32)","Vdd32.h=vmpy(Vu32.ub,Vv32.b)", +"Vector absolute value of words", + VddV.v[0].h[i] = fMPY8US(fGETUBYTE(0, VuV.uh[i]), fGETBYTE(0, VvV.h[i])); + VddV.v[1].h[i] = fMPY8US(fGETUBYTE(1, VuV.uh[i]), fGETBYTE(1, VvV.h[i]))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpybusv_acc,"Vxx32+=vmpybus(Vu32,Vv32)","Vxx32.h+=vmpy(Vu32.ub,Vv32.b)", +"Vector absolute value of words", + VxxV.v[0].h[i] += fMPY8US(fGETUBYTE(0, VuV.uh[i]), fGETBYTE(0, VvV.h[i])); + VxxV.v[1].h[i] += fMPY8US(fGETUBYTE(1, VuV.uh[i]), fGETBYTE(1, VvV.h[i]))) + + + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpabusv,"Vdd32=vmpabus(Vuu32,Vvv32)","Vdd32.h=vmpa(Vuu32.ub,Vvv32.b)", +"Vertical Byte Multiply", + VddV.v[0].h[i] = fMPY8US(fGETUBYTE(0, VuuV.v[0].uh[i]), fGETBYTE(0, VvvV.v[0].uh[i])) + fMPY8US(fGETUBYTE(0, VuuV.v[1].uh[i]), fGETBYTE(0, VvvV.v[1].uh[i])); + VddV.v[1].h[i] = fMPY8US(fGETUBYTE(1, VuuV.v[0].uh[i]), fGETBYTE(1, VvvV.v[0].uh[i])) + fMPY8US(fGETUBYTE(1, VuuV.v[1].uh[i]), fGETBYTE(1, VvvV.v[1].uh[i]))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpabuuv,"Vdd32=vmpabuu(Vuu32,Vvv32)","Vdd32.h=vmpa(Vuu32.ub,Vvv32.ub)", +"Vertical Byte Multiply", + VddV.v[0].h[i] = fMPY8UU(fGETUBYTE(0, VuuV.v[0].uh[i]), fGETUBYTE(0, VvvV.v[0].uh[i])) + fMPY8UU(fGETUBYTE(0, VuuV.v[1].uh[i]), fGETUBYTE(0, VvvV.v[1].uh[i])); + VddV.v[1].h[i] = fMPY8UU(fGETUBYTE(1, VuuV.v[0].uh[i]), fGETUBYTE(1, VvvV.v[0].uh[i])) + fMPY8UU(fGETUBYTE(1, VuuV.v[1].uh[i]), fGETUBYTE(1, VvvV.v[1].uh[i]))) + + + + + + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyhv,"Vdd32=vmpyh(Vu32,Vv32)","Vdd32.w=vmpy(Vu32.h,Vv32.h)", +"Vector by Vector Halfword Multiply", + VddV.v[0].w[i] = fMPY16SS(fGETHALF(0, VuV.w[i]), fGETHALF(0, VvV.w[i])); + VddV.v[1].w[i] = fMPY16SS(fGETHALF(1, VuV.w[i]), fGETHALF(1, VvV.w[i]))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyhv_acc,"Vxx32+=vmpyh(Vu32,Vv32)","Vxx32.w+=vmpy(Vu32.h,Vv32.h)", +"Vector by Vector Halfword Multiply", + VxxV.v[0].w[i] += fMPY16SS(fGETHALF(0, VuV.w[i]), fGETHALF(0, VvV.w[i])); + VxxV.v[1].w[i] += fMPY16SS(fGETHALF(1, VuV.w[i]), fGETHALF(1, VvV.w[i]))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyuhv,"Vdd32=vmpyuh(Vu32,Vv32)","Vdd32.uw=vmpy(Vu32.uh,Vv32.uh)", +"Vector by Vector Unsigned Halfword Multiply", + VddV.v[0].uw[i] = fMPY16UU(fGETUHALF(0, VuV.uw[i]), fGETUHALF(0, VvV.uw[i])); + VddV.v[1].uw[i] = fMPY16UU(fGETUHALF(1, VuV.uw[i]), fGETUHALF(1, VvV.uw[i]))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyuhv_acc,"Vxx32+=vmpyuh(Vu32,Vv32)","Vxx32.uw+=vmpy(Vu32.uh,Vv32.uh)", +"Vector by Vector Unsigned Halfword Multiply", + VxxV.v[0].uw[i] += fMPY16UU(fGETUHALF(0, VuV.uw[i]), fGETUHALF(0, VvV.uw[i])); + VxxV.v[1].uw[i] += fMPY16UU(fGETUHALF(1, VuV.uw[i]), fGETUHALF(1, VvV.uw[i]))) + + + +/* Vector by Vector */ +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpyhvsrs,"Vd32=vmpyh(Vu32,Vv32):<<1:rnd:sat","Vd32.h=vmpy(Vu32.h,Vv32.h):<<1:rnd:sat", +"Vector halfword multiply with round, shift, and sat16", + VdV.h[i] = fVSATH(fGETHALF(1,fVSAT(fROUND((fMPY16SS(VuV.h[i],VvV.h[i] )<<1)))))) + + + + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyhus, "Vdd32=vmpyhus(Vu32,Vv32)","Vdd32.w=vmpy(Vu32.h,Vv32.uh)", +"Vector by Vector Halfword Multiply", + VddV.v[0].w[i] = fMPY16SU(fGETHALF(0, VuV.w[i]), fGETUHALF(0, VvV.uw[i])); + VddV.v[1].w[i] = fMPY16SU(fGETHALF(1, VuV.w[i]), fGETUHALF(1, VvV.uw[i]))) + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyhus_acc, "Vxx32+=vmpyhus(Vu32,Vv32)","Vxx32.w+=vmpy(Vu32.h,Vv32.uh)", +"Vector by Vector Halfword Multiply", + VxxV.v[0].w[i] += fMPY16SU(fGETHALF(0, VuV.w[i]), fGETUHALF(0, VvV.uw[i])); + VxxV.v[1].w[i] += fMPY16SU(fGETHALF(1, VuV.w[i]), fGETUHALF(1, VvV.uw[i]))) + + + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpyih,"Vd32=vmpyih(Vu32,Vv32)","Vd32.h=vmpyi(Vu32.h,Vv32.h)", +"Vector by Vector Halfword Multiply", + VdV.h[i] = fMPY16SS(VuV.h[i], VvV.h[i])) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpyih_acc,"Vx32+=vmpyih(Vu32,Vv32)","Vx32.h+=vmpyi(Vu32.h,Vv32.h)", +"Vector by Vector Halfword Multiply", + VxV.h[i] += fMPY16SS(VuV.h[i], VvV.h[i])) + + + +/* 32x32 high half / frac */ + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyewuh,"Vd32=vmpyewuh(Vu32,Vv32)","Vd32.w=vmpye(Vu32.w,Vv32.uh)", +"Vector by Vector Halfword Multiply", +VdV.w[i] = fMPY3216SU(VuV.w[i], fGETUHALF(0, VvV.w[i])) >> 16) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyowh,"Vd32=vmpyowh(Vu32,Vv32):<<1:sat","Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:sat", +"Vector by Vector Halfword Multiply", +VdV.w[i] = fVSATW((((fMPY3216SS(VuV.w[i], fGETHALF(1, VvV.w[i])) >> 14) + 0) >> 1))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyowh_rnd,"Vd32=vmpyowh(Vu32,Vv32):<<1:rnd:sat","Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat", +"Vector by Vector Halfword Multiply", +VdV.w[i] = fVSATW((((fMPY3216SS(VuV.w[i], fGETHALF(1, VvV.w[i])) >> 14) + 1) >> 1))) + +ITERATOR_INSN_MPY_SLOT_DOUBLE_VEC(32,vmpyewuh_64,"Vdd32=vmpye(Vu32.w,Vv32.uh)", +"Word times Halfword Multiply, 64-bit result", + fHIDE(size8s_t prod;) + prod = fMPY32SU(VuV.w[i],fGETUHALF(0,VvV.w[i])); + VddV.v[1].w[i] = prod >> 16; + VddV.v[0].w[i] = prod << 16) + +ITERATOR_INSN_MPY_SLOT_DOUBLE_VEC(32,vmpyowh_64_acc,"Vxx32+=vmpyo(Vu32.w,Vv32.h)", +"Word times Halfword Multiply, 64-bit result", + fHIDE(size8s_t prod;) + prod = fMPY32SS(VuV.w[i],fGETHALF(1,VvV.w[i])) + fSE32_64(VxxV.v[1].w[i]); + VxxV.v[1].w[i] = prod >> 16; + fSETHALF(0, VxxV.v[0].w[i], VxxV.v[0].w[i] >> 16); + fSETHALF(1, VxxV.v[0].w[i], prod & 0x0000ffff)) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyowh_sacc,"Vx32+=vmpyowh(Vu32,Vv32):<<1:sat:shift","Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:sat:shift", +"Vector by Vector Halfword Multiply", +IV1DEAD() VxV.w[i] = fVSATW(((((VxV.w[i] + fMPY3216SS(VuV.w[i], fGETHALF(1, VvV.w[i]))) >> 14) + 0) >> 1))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyowh_rnd_sacc,"Vx32+=vmpyowh(Vu32,Vv32):<<1:rnd:sat:shift","Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat:shift", +"Vector by Vector Halfword Multiply", +IV1DEAD() VxV.w[i] = fVSATW(((((VxV.w[i] + fMPY3216SS(VuV.w[i], fGETHALF(1, VvV.w[i]))) >> 14) + 1) >> 1))) + +/* For 32x32 integer / low half */ + +ITERATOR_INSN_MPY_SLOT(32,vmpyieoh,"Vd32.w=vmpyieo(Vu32.h,Vv32.h)","Odd/Even multiply for 32x32 low half", + VdV.w[i] = (fGETHALF(0,VuV.w[i])*fGETHALF(1,VvV.w[i])) << 16) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyiewuh,"Vd32=vmpyiewuh(Vu32,Vv32)","Vd32.w=vmpyie(Vu32.w,Vv32.uh)", +"Vector by Vector Word by Halfword Multiply", +IV1DEAD() VdV.w[i] = fMPY3216SU(VuV.w[i], fGETUHALF(0, VvV.w[i])) ) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyiowh,"Vd32=vmpyiowh(Vu32,Vv32)","Vd32.w=vmpyio(Vu32.w,Vv32.h)", +"Vector by Vector Word by Halfword Multiply", +IV1DEAD() VdV.w[i] = fMPY3216SS(VuV.w[i], fGETHALF(1, VvV.w[i])) ) + +/* Add back these... */ + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyiewh_acc,"Vx32+=vmpyiewh(Vu32,Vv32)","Vx32.w+=vmpyie(Vu32.w,Vv32.h)", +"Vector by Vector Word by Halfword Multiply", +VxV.w[i] = VxV.w[i] + fMPY3216SS(VuV.w[i], fGETHALF(0, VvV.w[i])) ) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyiewuh_acc,"Vx32+=vmpyiewuh(Vu32,Vv32)","Vx32.w+=vmpyie(Vu32.w,Vv32.uh)", +"Vector by Vector Word by Halfword Multiply", +VxV.w[i] = VxV.w[i] + fMPY3216SU(VuV.w[i], fGETUHALF(0, VvV.w[i])) ) + + + + + + + +/* Vector by Scalar */ +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpyub,"Vdd32=vmpyub(Vu32,Rt32)","Vdd32.uh=vmpy(Vu32.ub,Rt32.ub)", +"Vector absolute value of words", + VddV.v[0].uh[i] = fMPY8UU(fGETUBYTE(0, VuV.uh[i]), fGETUBYTE((2*i+0)%4, RtV)); + VddV.v[1].uh[i] = fMPY8UU(fGETUBYTE(1, VuV.uh[i]), fGETUBYTE((2*i+1)%4, RtV))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpyub_acc,"Vxx32+=vmpyub(Vu32,Rt32)","Vxx32.uh+=vmpy(Vu32.ub,Rt32.ub)", +"Vector absolute value of words", + VxxV.v[0].uh[i] += fMPY8UU(fGETUBYTE(0, VuV.uh[i]), fGETUBYTE((2*i+0)%4, RtV)); + VxxV.v[1].uh[i] += fMPY8UU(fGETUBYTE(1, VuV.uh[i]), fGETUBYTE((2*i+1)%4, RtV))) + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpybus,"Vdd32=vmpybus(Vu32,Rt32)","Vdd32.h=vmpy(Vu32.ub,Rt32.b)", +"Vector absolute value of words", + VddV.v[0].h[i] = fMPY8US(fGETUBYTE(0, VuV.uh[i]), fGETBYTE((2*i+0)%4, RtV)); + VddV.v[1].h[i] = fMPY8US(fGETUBYTE(1, VuV.uh[i]), fGETBYTE((2*i+1)%4, RtV))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpybus_acc,"Vxx32+=vmpybus(Vu32,Rt32)","Vxx32.h+=vmpy(Vu32.ub,Rt32.b)", +"Vector absolute value of words", + VxxV.v[0].h[i] += fMPY8US(fGETUBYTE(0, VuV.uh[i]), fGETBYTE((2*i+0)%4, RtV)); + VxxV.v[1].h[i] += fMPY8US(fGETUBYTE(1, VuV.uh[i]), fGETBYTE((2*i+1)%4, RtV))) + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpabus,"Vdd32=vmpabus(Vuu32,Rt32)","Vdd32.h=vmpa(Vuu32.ub,Rt32.b)", +"Vertical Byte Multiply", + VddV.v[0].h[i] = fMPY8US(fGETUBYTE(0, VuuV.v[0].uh[i]), fGETBYTE(0, RtV)) + fMPY16SS(fGETUBYTE(0, VuuV.v[1].uh[i]), fGETBYTE(1, RtV)); + VddV.v[1].h[i] = fMPY8US(fGETUBYTE(1, VuuV.v[0].uh[i]), fGETBYTE(2, RtV)) + fMPY16SS(fGETUBYTE(1, VuuV.v[1].uh[i]), fGETBYTE(3, RtV))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(16,vmpabus_acc,"Vxx32+=vmpabus(Vuu32,Rt32)","Vxx32.h+=vmpa(Vuu32.ub,Rt32.b)", +"Vertical Byte Multiply", + VxxV.v[0].h[i] += fMPY8US(fGETUBYTE(0, VuuV.v[0].uh[i]), fGETBYTE(0, RtV)) + fMPY16SS(fGETUBYTE(0, VuuV.v[1].uh[i]), fGETBYTE(1, RtV)); + VxxV.v[1].h[i] += fMPY8US(fGETUBYTE(1, VuuV.v[0].uh[i]), fGETBYTE(2, RtV)) + fMPY16SS(fGETUBYTE(1, VuuV.v[1].uh[i]), fGETBYTE(3, RtV))) + +// V65 + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC_NOV1(16,vmpabuu,"Vdd32=vmpabuu(Vuu32,Rt32)","Vdd32.h=vmpa(Vuu32.ub,Rt32.ub)", +"Vertical Byte Multiply", + VddV.v[0].uh[i] = fMPY8UU(fGETUBYTE(0, VuuV.v[0].uh[i]), fGETUBYTE(0, RtV)) + fMPY8UU(fGETUBYTE(0, VuuV.v[1].uh[i]), fGETUBYTE(1, RtV)); + VddV.v[1].uh[i] = fMPY8UU(fGETUBYTE(1, VuuV.v[0].uh[i]), fGETUBYTE(2, RtV)) + fMPY8UU(fGETUBYTE(1, VuuV.v[1].uh[i]), fGETUBYTE(3, RtV))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC_NOV1(16,vmpabuu_acc,"Vxx32+=vmpabuu(Vuu32,Rt32)","Vxx32.h+=vmpa(Vuu32.ub,Rt32.ub)", +"Vertical Byte Multiply", + VxxV.v[0].uh[i] += fMPY8UU(fGETUBYTE(0, VuuV.v[0].uh[i]), fGETUBYTE(0, RtV)) + fMPY8UU(fGETUBYTE(0, VuuV.v[1].uh[i]), fGETUBYTE(1, RtV)); + VxxV.v[1].uh[i] += fMPY8UU(fGETUBYTE(1, VuuV.v[0].uh[i]), fGETUBYTE(2, RtV)) + fMPY8UU(fGETUBYTE(1, VuuV.v[1].uh[i]), fGETUBYTE(3, RtV))) + + + + +/* Half by Byte */ +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpahb,"Vdd32=vmpahb(Vuu32,Rt32)","Vdd32.w=vmpa(Vuu32.h,Rt32.b)", +"Vertical Byte Multiply", + VddV.v[0].w[i] = fMPY16SS(fGETHALF(0, VuuV.v[0].w[i]), fSE8_16(fGETBYTE(0, RtV))) + fMPY16SS(fGETHALF(0, VuuV.v[1].w[i]), fSE8_16(fGETBYTE(1, RtV))); + VddV.v[1].w[i] = fMPY16SS(fGETHALF(1, VuuV.v[0].w[i]), fSE8_16(fGETBYTE(2, RtV))) + fMPY16SS(fGETHALF(1, VuuV.v[1].w[i]), fSE8_16(fGETBYTE(3, RtV)))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpahb_acc,"Vxx32+=vmpahb(Vuu32,Rt32)","Vxx32.w+=vmpa(Vuu32.h,Rt32.b)", +"Vertical Byte Multiply", + VxxV.v[0].w[i] += fMPY16SS(fGETHALF(0, VuuV.v[0].w[i]), fSE8_16(fGETBYTE(0, RtV))) + fMPY16SS(fGETHALF(0, VuuV.v[1].w[i]), fSE8_16(fGETBYTE(1, RtV))); + VxxV.v[1].w[i] += fMPY16SS(fGETHALF(1, VuuV.v[0].w[i]), fSE8_16(fGETBYTE(2, RtV))) + fMPY16SS(fGETHALF(1, VuuV.v[1].w[i]), fSE8_16(fGETBYTE(3, RtV)))) + +/* Half by Byte */ +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpauhb,"Vdd32=vmpauhb(Vuu32,Rt32)","Vdd32.w=vmpa(Vuu32.uh,Rt32.b)", +"Vertical Byte Multiply", + VddV.v[0].w[i] = fMPY16US(fGETUHALF(0, VuuV.v[0].w[i]), fSE8_16(fGETBYTE(0, RtV))) + fMPY16US(fGETUHALF(0, VuuV.v[1].w[i]), fSE8_16(fGETBYTE(1, RtV))); + VddV.v[1].w[i] = fMPY16US(fGETUHALF(1, VuuV.v[0].w[i]), fSE8_16(fGETBYTE(2, RtV))) + fMPY16US(fGETUHALF(1, VuuV.v[1].w[i]), fSE8_16(fGETBYTE(3, RtV)))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpauhb_acc,"Vxx32+=vmpauhb(Vuu32,Rt32)","Vxx32.w+=vmpa(Vuu32.uh,Rt32.b)", +"Vertical Byte Multiply", + VxxV.v[0].w[i] += fMPY16US(fGETUHALF(0, VuuV.v[0].w[i]), fSE8_16(fGETBYTE(0, RtV))) + fMPY16US(fGETUHALF(0, VuuV.v[1].w[i]), fSE8_16(fGETBYTE(1, RtV))); + VxxV.v[1].w[i] += fMPY16US(fGETUHALF(1, VuuV.v[0].w[i]), fSE8_16(fGETBYTE(2, RtV))) + fMPY16US(fGETUHALF(1, VuuV.v[1].w[i]), fSE8_16(fGETBYTE(3, RtV)))) + + + + + + + +/* Half by Half */ +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyh,"Vdd32=vmpyh(Vu32,Rt32)","Vdd32.w=vmpy(Vu32.h,Rt32.h)", +"Vector absolute value of words", + VddV.v[0].w[i] = fMPY16SS(fGETHALF(0, VuV.w[i]), fGETHALF(0, RtV)); + VddV.v[1].w[i] = fMPY16SS(fGETHALF(1, VuV.w[i]), fGETHALF(1, RtV))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC_NOV1(32,vmpyh_acc,"Vxx32+=vmpyh(Vu32,Rt32)","Vxx32.w+=vmpy(Vu32.h,Rt32.h)", +"Vector even halfwords with scalar lower halfword multiply with shift and sat32", + VxxV.v[0].w[i] = fCAST8s(VxxV.v[0].w[i]) + fMPY16SS(fGETHALF(0, VuV.w[i]), fGETHALF(0, RtV)); + VxxV.v[1].w[i] = fCAST8s(VxxV.v[1].w[i]) + fMPY16SS(fGETHALF(1, VuV.w[i]), fGETHALF(1, RtV))) + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyhsat_acc,"Vxx32+=vmpyh(Vu32,Rt32):sat","Vxx32.w+=vmpy(Vu32.h,Rt32.h):sat", +"Vector even halfwords with scalar lower halfword multiply with shift and sat32", + VxxV.v[0].w[i] = fVSATW(fCAST8s(VxxV.v[0].w[i]) + fMPY16SS(fGETHALF(0, VuV.w[i]), fGETHALF(0, RtV))); + VxxV.v[1].w[i] = fVSATW(fCAST8s(VxxV.v[1].w[i]) + fMPY16SS(fGETHALF(1, VuV.w[i]), fGETHALF(1, RtV)))) + + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyhss,"Vd32=vmpyh(Vu32,Rt32):<<1:sat","Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:sat", +"Vector halfword by halfword multiply, shift by 1, and take upper 16 msb", + fSETHALF(0,VdV.w[i],fVSATH(fGETHALF(1,fVSAT((fMPY16SS(fGETHALF(0,VuV.w[i]),fGETHALF(0,RtV))<<1))))); + fSETHALF(1,VdV.w[i],fVSATH(fGETHALF(1,fVSAT((fMPY16SS(fGETHALF(1,VuV.w[i]),fGETHALF(1,RtV))<<1))))); +) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyhsrs,"Vd32=vmpyh(Vu32,Rt32):<<1:rnd:sat","Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:rnd:sat", +"Vector halfword with scalar halfword multiply with round, shift, and sat16", + fSETHALF(0,VdV.w[i],fVSATH(fGETHALF(1,fVSAT(fROUND((fMPY16SS(fGETHALF(0,VuV.w[i]),fGETHALF(0,RtV))<<1)))))); + fSETHALF(1,VdV.w[i],fVSATH(fGETHALF(1,fVSAT(fROUND((fMPY16SS(fGETHALF(1,VuV.w[i]),fGETHALF(1,RtV))<<1)))))); +) + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyuh,"Vdd32=vmpyuh(Vu32,Rt32)","Vdd32.uw=vmpy(Vu32.uh,Rt32.uh)", +"Vector even halfword unsigned multiply by scalar", + VddV.v[0].uw[i] = fMPY16UU(fGETUHALF(0, VuV.uw[i]),fGETUHALF(0,RtV)); + VddV.v[1].uw[i] = fMPY16UU(fGETUHALF(1, VuV.uw[i]),fGETUHALF(1,RtV))) + + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyuh_acc,"Vxx32+=vmpyuh(Vu32,Rt32)","Vxx32.uw+=vmpy(Vu32.uh,Rt32.uh)", +"Vector even halfword unsigned multiply by scalar", + VxxV.v[0].uw[i] += fMPY16UU(fGETUHALF(0, VuV.uw[i]),fGETUHALF(0,RtV)); + VxxV.v[1].uw[i] += fMPY16UU(fGETUHALF(1, VuV.uw[i]),fGETUHALF(1,RtV))) + + + + +/******************************************** +* HALF BY BYTE +********************************************/ +ITERATOR_INSN2_MPY_SLOT(16,vmpyihb,"Vd32=vmpyihb(Vu32,Rt32)","Vd32.h=vmpyi(Vu32.h,Rt32.b)", +"Vector word by byte multiply, keep lower result", +VdV.h[i] = fMPY16SS(VuV.h[i], fGETBYTE(i % 4, RtV) )) + +ITERATOR_INSN2_MPY_SLOT(16,vmpyihb_acc,"Vx32+=vmpyihb(Vu32,Rt32)","Vx32.h+=vmpyi(Vu32.h,Rt32.b)", +"Vector word by byte multiply, keep lower result", +VxV.h[i] += fMPY16SS(VuV.h[i], fGETBYTE(i % 4, RtV) )) + + +/******************************************** +* WORD BY BYTE +********************************************/ +ITERATOR_INSN2_MPY_SLOT(32,vmpyiwb,"Vd32=vmpyiwb(Vu32,Rt32)","Vd32.w=vmpyi(Vu32.w,Rt32.b)", +"Vector word by byte multiply, keep lower result", +VdV.w[i] = fMPY32SS(VuV.w[i], fGETBYTE(i % 4, RtV) )) + +ITERATOR_INSN2_MPY_SLOT(32,vmpyiwb_acc,"Vx32+=vmpyiwb(Vu32,Rt32)","Vx32.w+=vmpyi(Vu32.w,Rt32.b)", +"Vector word by byte multiply, keep lower result", +VxV.w[i] += fMPY32SS(VuV.w[i], fGETBYTE(i % 4, RtV) )) + +ITERATOR_INSN2_MPY_SLOT(32,vmpyiwub,"Vd32=vmpyiwub(Vu32,Rt32)","Vd32.w=vmpyi(Vu32.w,Rt32.ub)", +"Vector word by byte multiply, keep lower result", +VdV.w[i] = fMPY32SS(VuV.w[i], fGETUBYTE(i % 4, RtV) )) + +ITERATOR_INSN2_MPY_SLOT(32,vmpyiwub_acc,"Vx32+=vmpyiwub(Vu32,Rt32)","Vx32.w+=vmpyi(Vu32.w,Rt32.ub)", +"Vector word by byte multiply, keep lower result", +VxV.w[i] += fMPY32SS(VuV.w[i], fGETUBYTE(i % 4, RtV) )) + + +/******************************************** +* WORD BY HALF +********************************************/ +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyiwh,"Vd32=vmpyiwh(Vu32,Rt32)","Vd32.w=vmpyi(Vu32.w,Rt32.h)", +"Vector word by byte multiply, keep lower result", +VdV.w[i] = fMPY32SS(VuV.w[i], fGETHALF(i % 2, RtV))) + +ITERATOR_INSN2_MPY_SLOT_DOUBLE_VEC(32,vmpyiwh_acc,"Vx32+=vmpyiwh(Vu32,Rt32)","Vx32.w+=vmpyi(Vu32.w,Rt32.h)", +"Vector word by byte multiply, keep lower result", +VxV.w[i] += fMPY32SS(VuV.w[i], fGETHALF(i % 2, RtV))) + + + + + + + + + + + + + + + + + + + +/************************************************************************** + * MMVECTOR LOGICAL OPERATIONS + * ************************************************************************/ +ITERATOR_INSN_ANY_SLOT(16,vand,"Vd32=vand(Vu32,Vv32)", "Vector Logical And", VdV.uh[i] = VuV.uh[i] & VvV.h[i]) +ITERATOR_INSN_ANY_SLOT(16,vor, "Vd32=vor(Vu32,Vv32)", "Vector Logical Or", VdV.uh[i] = VuV.uh[i] | VvV.h[i]) +ITERATOR_INSN_ANY_SLOT(16,vxor,"Vd32=vxor(Vu32,Vv32)", "Vector Logical XOR", VdV.uh[i] = VuV.uh[i] ^ VvV.h[i]) +ITERATOR_INSN_ANY_SLOT(16,vnot,"Vd32=vnot(Vu32)", "Vector Logical NOT", VdV.uh[i] = ~VuV.uh[i]) + + + + + +ITERATOR_INSN2_MPY_SLOT_LATE(8, vandqrt, +"Vd32.ub=vand(Qu4.ub,Rt32.ub)", "Vd32=vand(Qu4,Rt32)", "Insert Predicate into Vector", + VdV.ub[i] = fGETQBIT(QuV,i) ? fGETUBYTE(i % 4, RtV) : 0) + +ITERATOR_INSN2_MPY_SLOT_LATE(8, vandqrt_acc, +"Vx32.ub|=vand(Qu4.ub,Rt32.ub)", "Vx32|=vand(Qu4,Rt32)", "Insert Predicate into Vector", + VxV.ub[i] |= (fGETQBIT(QuV,i)) ? fGETUBYTE(i % 4, RtV) : 0) + +ITERATOR_INSN2_MPY_SLOT_LATE(8, vandnqrt, +"Vd32.ub=vand(!Qu4.ub,Rt32.ub)", "Vd32=vand(!Qu4,Rt32)", "Insert Predicate into Vector", + VdV.ub[i] = !fGETQBIT(QuV,i) ? fGETUBYTE(i % 4, RtV) : 0) + +ITERATOR_INSN2_MPY_SLOT_LATE(8, vandnqrt_acc, +"Vx32.ub|=vand(!Qu4.ub,Rt32.ub)", "Vx32|=vand(!Qu4,Rt32)", "Insert Predicate into Vector", + VxV.ub[i] |= !(fGETQBIT(QuV,i)) ? fGETUBYTE(i % 4, RtV) : 0) + + +ITERATOR_INSN2_MPY_SLOT_LATE(8, vandvrt, +"Qd4.ub=vand(Vu32.ub,Rt32.ub)", "Qd4=vand(Vu32,Rt32)", "Insert into Predicate", + fSETQBIT(QdV,i,((VuV.ub[i] & fGETUBYTE(i % 4, RtV)) != 0) ? 1 : 0)) + +ITERATOR_INSN2_MPY_SLOT_LATE(8, vandvrt_acc, +"Qx4.ub|=vand(Vu32.ub,Rt32.ub)", "Qx4|=vand(Vu32,Rt32)", "Insert into Predicate ", + fSETQBIT(QxV,i,fGETQBIT(QxV,i)|(((VuV.ub[i] & fGETUBYTE(i % 4, RtV)) != 0) ? 1 : 0))) + +ITERATOR_INSN_ANY_SLOT(8,vandvqv,"Vd32=vand(Qv4,Vu32)","Mask off bytes", +VdV.b[i] = fGETQBIT(QvV,i) ? VuV.b[i] : 0) +ITERATOR_INSN_ANY_SLOT(8,vandvnqv,"Vd32=vand(!Qv4,Vu32)","Mask off bytes", +VdV.b[i] = !fGETQBIT(QvV,i) ? VuV.b[i] : 0) + + + /*************************************************** + * Compare Vector with Vector + ***************************************************/ +#define VCMP(DEST, ASRC, ASRCOP, CMP, N, SRC, MASK, WIDTH) \ +{ \ + for(fHIDE(int) i = 0; i < fVBYTES(); i += WIDTH) { \ + fSETQBITS(DEST,WIDTH,MASK,i,ASRC ASRCOP ((VuV.SRC[i/WIDTH] CMP VvV.SRC[i/WIDTH]) ? MASK : 0)); \ + } \ + } + + +#define MMVEC_CMPGT(TYPE,TYPE2,TYPE3,DESCR,N,MASK,WIDTH,SRC) \ +EXTINSN(V6_vgt##TYPE, "Qd4=vcmp.gt(Vu32." TYPE2 ",Vv32." TYPE2 ")", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), DESCR" greater than", \ + VCMP(QdV, , , >, N, SRC, MASK, WIDTH)) \ +EXTINSN(V6_vgt##TYPE##_and, "Qx4&=vcmp.gt(Vu32." TYPE2 ",Vv32." TYPE2 ")", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), DESCR" greater than with predicate-and", \ + VCMP(QxV, fGETQBITS(QxV,WIDTH,MASK,i), &, >, N, SRC, MASK, WIDTH)) \ +EXTINSN(V6_vgt##TYPE##_or, "Qx4|=vcmp.gt(Vu32." TYPE2 ",Vv32." TYPE2 ")", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), DESCR" greater than with predicate-or", \ + VCMP(QxV, fGETQBITS(QxV,WIDTH,MASK,i), |, >, N, SRC, MASK, WIDTH)) \ +EXTINSN(V6_vgt##TYPE##_xor, "Qx4^=vcmp.gt(Vu32." TYPE2 ",Vv32." TYPE2 ")", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), DESCR" greater than with predicate-xor", \ + VCMP(QxV, fGETQBITS(QxV,WIDTH,MASK,i), ^, >, N, SRC, MASK, WIDTH)) + +#define MMVEC_CMP(TYPE,TYPE2,TYPE3,DESCR,N,MASK, WIDTH, SRC)\ +MMVEC_CMPGT(TYPE,TYPE2,TYPE3,DESCR,N,MASK,WIDTH,SRC) \ +EXTINSN(V6_veq##TYPE, "Qd4=vcmp.eq(Vu32." TYPE2 ",Vv32." TYPE2 ")", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), DESCR" equal to", \ + VCMP(QdV, , , ==, N, SRC, MASK, WIDTH)) \ +EXTINSN(V6_veq##TYPE##_and, "Qx4&=vcmp.eq(Vu32." TYPE2 ",Vv32." TYPE2 ")", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), DESCR" equalto with predicate-and", \ + VCMP(QxV, fGETQBITS(QxV,WIDTH,MASK,i), &, ==, N, SRC, MASK, WIDTH)) \ +EXTINSN(V6_veq##TYPE##_or, "Qx4|=vcmp.eq(Vu32." TYPE2 ",Vv32." TYPE2 ")", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), DESCR" equalto with predicate-or", \ + VCMP(QxV, fGETQBITS(QxV,WIDTH,MASK,i), |, ==, N, SRC, MASK, WIDTH)) \ +EXTINSN(V6_veq##TYPE##_xor, "Qx4^=vcmp.eq(Vu32." TYPE2 ",Vv32." TYPE2 ")", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), DESCR" equalto with predicate-xor", \ + VCMP(QxV, fGETQBITS(QxV,WIDTH,MASK,i), ^, ==, N, SRC, MASK, WIDTH)) + + +MMVEC_CMP(w,"w","","Vector Word Compare ", fVELEM(32), 0xF, 4, w) +MMVEC_CMP(h,"h","","Vector Half Compare ", fVELEM(16), 0x3, 2, h) +MMVEC_CMP(b,"b","","Vector Half Compare ", fVELEM(8), 0x1, 1, b) +MMVEC_CMPGT(uw,"uw","","Vector Unsigned Half Compare ", fVELEM(32), 0xF, 4,uw) +MMVEC_CMPGT(uh,"uh","","Vector Unsigned Half Compare ", fVELEM(16), 0x3, 2,uh) +MMVEC_CMPGT(ub,"ub","","Vector Unsigned Byte Compare ", fVELEM(8), 0x1, 1,ub) + +/*************************************************** +* Predicate Operations +***************************************************/ + +EXTINSN(V6_pred_scalar2, "Qd4=vsetq(Rt32)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP), "Set Vector Predicate ", +{ + fHIDE(int i;) + for(i = 0; i < fVBYTES(); i++) fSETQBIT(QdV,i,(i < (RtV & (fVBYTES()-1))) ? 1 : 0); +}) + +EXTINSN(V6_pred_scalar2v2, "Qd4=vsetq2(Rt32)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP), "Set Vector Predicate ", +{ + fHIDE(int i;) + for(i = 0; i < fVBYTES(); i++) fSETQBIT(QdV,i,(i <= ((RtV-1) & (fVBYTES()-1))) ? 1 : 0); +}) + + +ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8, shuffeqw, "Qd4.h=vshuffe(Qs4.w,Qt4.w)","Shrink Predicate", fSETQBIT(QdV,i, (i & 2) ? fGETQBIT(QsV,i-2) : fGETQBIT(QtV,i) ) ) +ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8, shuffeqh, "Qd4.b=vshuffe(Qs4.h,Qt4.h)","Shrink Predicate", fSETQBIT(QdV,i, (i & 1) ? fGETQBIT(QsV,i-1) : fGETQBIT(QtV,i) ) ) +ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8, pred_or, "Qd4=or(Qs4,Qt4)","Vector Predicate Or", fSETQBIT(QdV,i,fGETQBIT(QsV,i) || fGETQBIT(QtV,i) ) ) +ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8, pred_and, "Qd4=and(Qs4,Qt4)","Vector Predicate And", fSETQBIT(QdV,i,fGETQBIT(QsV,i) && fGETQBIT(QtV,i) ) ) +ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8, pred_xor, "Qd4=xor(Qs4,Qt4)","Vector Predicate Xor", fSETQBIT(QdV,i,fGETQBIT(QsV,i) ^ fGETQBIT(QtV,i) ) ) +ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8, pred_or_n, "Qd4=or(Qs4,!Qt4)","Vector Predicate Or with not", fSETQBIT(QdV,i,fGETQBIT(QsV,i) || !fGETQBIT(QtV,i) ) ) +ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8, pred_and_n, "Qd4=and(Qs4,!Qt4)","Vector Predicate And with not", fSETQBIT(QdV,i,fGETQBIT(QsV,i) && !fGETQBIT(QtV,i) ) ) +ITERATOR_INSN_ANY_SLOT(8, pred_not, "Qd4=not(Qs4)","Vector Predicate Not", fSETQBIT(QdV,i,!fGETQBIT(QsV,i) ) ) + + + +EXTINSN(V6_vcmov, "if (Ps4) Vd32=Vu32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), "Conditional Mov", +{ +if (fLSBOLD(PsV)) { + fHIDE(int i;) + fVFOREACH(8, i) { + VdV.ub[i] = VuV.ub[i]; + } + } else {CANCEL;} +}) + +EXTINSN(V6_vncmov, "if (!Ps4) Vd32=Vu32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA), "Conditional Mov", +{ +if (fLSBOLDNOT(PsV)) { + fHIDE(int i;) + fVFOREACH(8, i) { + VdV.ub[i] = VuV.ub[i]; + } + } else {CANCEL;} +}) + +EXTINSN(V6_vccombine, "if (Ps4) Vdd32=vcombine(Vu32,Vv32)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA_DV), "Conditional Combine", +{ +if (fLSBOLD(PsV)) { + fHIDE(int i;) + fVFOREACH(8, i) { + VddV.v[0].ub[i] = VvV.ub[i]; + VddV.v[1].ub[i] = VuV.ub[i]; + } + } else {CANCEL;} +}) + +EXTINSN(V6_vnccombine, "if (!Ps4) Vdd32=vcombine(Vu32,Vv32)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA_DV), "Conditional Combine", +{ +if (fLSBOLDNOT(PsV)) { + fHIDE(int i;) + fVFOREACH(8, i) { + VddV.v[0].ub[i] = VvV.ub[i]; + VddV.v[1].ub[i] = VuV.ub[i]; + } + } else {CANCEL;} +}) + + + +ITERATOR_INSN_ANY_SLOT(8,vmux,"Vd32=vmux(Qt4,Vu32,Vv32)", +"Vector Select Element 8-bit", + VdV.ub[i] = fGETQBIT(QtV,i) ? VuV.ub[i] : VvV.ub[i]) + +ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8,vswap,"Vdd32=vswap(Qt4,Vu32,Vv32)", +"Vector Swap Element 8-bit", + VddV.v[0].ub[i] = fGETQBIT(QtV,i) ? VuV.ub[i] : VvV.ub[i]; + VddV.v[1].ub[i] = !fGETQBIT(QtV,i) ? VuV.ub[i] : VvV.ub[i]) + + +/*************************************************************************** +* +* MMVECTOR SORTING +* +****************************************************************************/ + +#define MMVEC_SORT(TYPE,TYPE2,DESCR,ELEMENTSIZE,SRC)\ +ITERATOR_INSN2_ANY_SLOT(ELEMENTSIZE,vmax##TYPE, "Vd32=vmax" TYPE2 "(Vu32,Vv32)", "Vd32."#SRC"=vmax(Vu32."#SRC",Vv32."#SRC")", "Vector " DESCR " max", VdV.SRC[i] = (VuV.SRC[i] > VvV.SRC[i]) ? VuV.SRC[i] : VvV.SRC[i]) \ +ITERATOR_INSN2_ANY_SLOT(ELEMENTSIZE,vmin##TYPE, "Vd32=vmin" TYPE2 "(Vu32,Vv32)", "Vd32."#SRC"=vmin(Vu32."#SRC",Vv32."#SRC")", "Vector " DESCR " min", VdV.SRC[i] = (VuV.SRC[i] < VvV.SRC[i]) ? VuV.SRC[i] : VvV.SRC[i]) + +MMVEC_SORT(b,"b", "signed byte", 8, b) +MMVEC_SORT(ub,"ub", "unsigned byte", 8, ub) +MMVEC_SORT(uh,"uh", "unsigned halfword",16, uh) +MMVEC_SORT(h, "h", "halfword", 16, h) +MMVEC_SORT(w, "w", "word", 32, w) + + + + + + + + + +/************************************************************* +* SHUFFLES +****************************************************************/ + +ITERATOR_INSN2_ANY_SLOT(16,vsathub,"Vd32=vsathub(Vu32,Vv32)","Vd32.ub=vsat(Vu32.h,Vv32.h)", +"Saturate and pack 32 halfwords to 32 unsigned bytes, and interleave them", + fSETBYTE(0, VdV.uh[i], fVSATUB(VvV.h[i])); + fSETBYTE(1, VdV.uh[i], fVSATUB(VuV.h[i]))) + +ITERATOR_INSN2_ANY_SLOT(32,vsatwh,"Vd32=vsatwh(Vu32,Vv32)","Vd32.h=vsat(Vu32.w,Vv32.w)", +"Saturate and pack 16 words to 16 halfwords, and interleave them", + fSETHALF(0, VdV.w[i], fVSATH(VvV.w[i])); + fSETHALF(1, VdV.w[i], fVSATH(VuV.w[i]))) + +ITERATOR_INSN2_ANY_SLOT(32,vsatuwuh,"Vd32=vsatuwuh(Vu32,Vv32)","Vd32.uh=vsat(Vu32.uw,Vv32.uw)", +"Saturate and pack 16 words to 16 halfwords, and interleave them", + fSETHALF(0, VdV.w[i], fVSATUH(VvV.uw[i])); + fSETHALF(1, VdV.w[i], fVSATUH(VuV.uw[i]))) + +ITERATOR_INSN2_ANY_SLOT(16,vshuffeb,"Vd32=vshuffeb(Vu32,Vv32)","Vd32.b=vshuffe(Vu32.b,Vv32.b)", +"Shuffle half words with in a lane", + fSETBYTE(0, VdV.uh[i], fGETUBYTE(0, VvV.uh[i])); + fSETBYTE(1, VdV.uh[i], fGETUBYTE(0, VuV.uh[i]))) + +ITERATOR_INSN2_ANY_SLOT(16,vshuffob,"Vd32=vshuffob(Vu32,Vv32)","Vd32.b=vshuffo(Vu32.b,Vv32.b)", +"Shuffle half words with in a lane", + fSETBYTE(0, VdV.uh[i], fGETUBYTE(1, VvV.uh[i])); + fSETBYTE(1, VdV.uh[i], fGETUBYTE(1, VuV.uh[i]))) + +ITERATOR_INSN2_ANY_SLOT(32,vshufeh,"Vd32=vshuffeh(Vu32,Vv32)","Vd32.h=vshuffe(Vu32.h,Vv32.h)", +"Shuffle half words with in a lane", + fSETHALF(0, VdV.uw[i], fGETUHALF(0, VvV.uw[i])); + fSETHALF(1, VdV.uw[i], fGETUHALF(0, VuV.uw[i]))) + +ITERATOR_INSN2_ANY_SLOT(32,vshufoh,"Vd32=vshuffoh(Vu32,Vv32)","Vd32.h=vshuffo(Vu32.h,Vv32.h)", +"Shuffle half words with in a lane", + fSETHALF(0, VdV.uw[i], fGETUHALF(1, VvV.uw[i])); + fSETHALF(1, VdV.uw[i], fGETUHALF(1, VuV.uw[i]))) + + + + +/************************************************************************** +* Double Vector Shuffles +**************************************************************************/ + +EXTINSN(V6_vshuff, "vshuff(Vy32,Vx32,Rt32)", +ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP_VS), +"2x2->2x2 transpose, for multiple data sizes, inplace", +{ + fHIDE(int offset;) + for (offset=1; offset2x2 transpose for multiple data sizes", +{ + fHIDE(int offset;) + VddV.v[0] = VvV; + VddV.v[1] = VuV; + for (offset=1; offset>1; offset>0; offset>>=1) { + if ( RtV & offset) { + fHIDE(int k;) \ + fVFOREACH(8, k) {\ + if (!( k & offset)) { + fSWAPB(VyV.ub[k], VxV.ub[k+offset]); + } + } + } + } + }) + +EXTINSN(V6_vdealvdd, "Vdd32=vdeal(Vu32,Vv32,Rt8)", +ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP_VS), +" vector - vector deal - or deinterleave, for multiple data sizes", +{ + fHIDE(int offset;) + VddV.v[0] = VvV; + VddV.v[1] = VuV; + for (offset=fVBYTES()>>1; offset>0; offset>>=1) { + if ( RtV & offset) { + fHIDE(int k;) \ + fVFOREACH(8, k) {\ + if (!( k & offset)) { + fSWAPB(VddV.v[1].ub[k], VddV.v[0].ub[k+offset]); + } + } + } + } + }) + +/**************************************************************************/ + + + +ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(32,vshufoeh,"Vdd32=vshuffoeh(Vu32,Vv32)","Vdd32.h=vshuffoe(Vu32.h,Vv32.h)", +"Vector Shuffle half words", + fSETHALF(0, VddV.v[0].uw[i], fGETUHALF(0, VvV.uw[i])); + fSETHALF(1, VddV.v[0].uw[i], fGETUHALF(0, VuV.uw[i])); + fSETHALF(0, VddV.v[1].uw[i], fGETUHALF(1, VvV.uw[i])); + fSETHALF(1, VddV.v[1].uw[i], fGETUHALF(1, VuV.uw[i]))) + +ITERATOR_INSN2_ANY_SLOT_DOUBLE_VEC(16,vshufoeb,"Vdd32=vshuffoeb(Vu32,Vv32)","Vdd32.b=vshuffoe(Vu32.b,Vv32.b)", +"Vector Shuffle bytes", + fSETBYTE(0, VddV.v[0].uh[i], fGETUBYTE(0, VvV.uh[i])); + fSETBYTE(1, VddV.v[0].uh[i], fGETUBYTE(0, VuV.uh[i])); + fSETBYTE(0, VddV.v[1].uh[i], fGETUBYTE(1, VvV.uh[i])); + fSETBYTE(1, VddV.v[1].uh[i], fGETUBYTE(1, VuV.uh[i]))) + + +/*************************************************************** +* Deal +***************************************************************/ + +ITERATOR_INSN2_PERMUTE_SLOT(32, vdealh, "Vd32=vdealh(Vu32)", "Vd32.h=vdeal(Vu32.h)", +"Deal Halfwords", + VdV.uh[i ] = fGETUHALF(0, VuV.uw[i]); + VdV.uh[i+fVELEM(32)] = fGETUHALF(1, VuV.uw[i])) + +ITERATOR_INSN2_PERMUTE_SLOT(16, vdealb, "Vd32=vdealb(Vu32)", "Vd32.b=vdeal(Vu32.b)", +"Deal Halfwords", + VdV.ub[i ] = fGETUBYTE(0, VuV.uh[i]); + VdV.ub[i+fVELEM(16)] = fGETUBYTE(1, VuV.uh[i])) + +ITERATOR_INSN2_PERMUTE_SLOT(32, vdealb4w, "Vd32=vdealb4w(Vu32,Vv32)", "Vd32.b=vdeale(Vu32.b,Vv32.b)", +"Deal Two Vectors Bytes", + VdV.ub[0+i ] = fGETUBYTE(0, VvV.uw[i]); + VdV.ub[fVELEM(32)+i ] = fGETUBYTE(2, VvV.uw[i]); + VdV.ub[2*fVELEM(32)+i] = fGETUBYTE(0, VuV.uw[i]); + VdV.ub[3*fVELEM(32)+i] = fGETUBYTE(2, VuV.uw[i])) + +/*************************************************************** +* shuffle +***************************************************************/ + +ITERATOR_INSN2_PERMUTE_SLOT(32, vshuffh, "Vd32=vshuffh(Vu32)", "Vd32.h=vshuff(Vu32.h)", +"Deal Halfwords", + fSETHALF(0, VdV.uw[i], VuV.uh[i]); + fSETHALF(1, VdV.uw[i], VuV.uh[i+fVELEM(32)])) + +ITERATOR_INSN2_PERMUTE_SLOT(16, vshuffb, "Vd32=vshuffb(Vu32)", "Vd32.b=vshuff(Vu32.b)", +"Deal Halfwords", + fSETBYTE(0, VdV.uh[i], VuV.ub[i]); + fSETBYTE(1, VdV.uh[i], VuV.ub[i+fVELEM(16)])) + + + + + +/*********************************************************** +* INSERT AND EXTRACT +*********************************************************/ +EXTINSN(V6_extractw, "Rd32=vextract(Vu32,Rs32)", +ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VA,A_MEMLIKE,A_RESTRICT_SLOT0ONLY), +"Extract an element from a vector to scalar", +fHIDE(warn("RdN=%d VuN=%d RsN=%d RsV=0x%08x widx=%d",RdN,VuN,RsN,RsV,((RsV & (fVBYTES()-1)) >> 2));) +RdV = VuV.uw[ (RsV & (fVBYTES()-1)) >> 2]; +fHIDE(warn("RdV=0x%08x",RdV);)) + +EXTINSN(V6_vinsertwr, "Vx32.w=vinsert(Rt32)", +ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VX), +"Insert Word Scalar into Vector", +VxV.uw[0] = RtV;) + + + + +ITERATOR_INSN_MPY_SLOT_LATE(32,lvsplatw, "Vd32=vsplat(Rt32)", "Replicates scalar accross words in vector", VdV.uw[i] = RtV) + +ITERATOR_INSN_MPY_SLOT_LATE(16,lvsplath, "Vd32.h=vsplat(Rt32)", "Replicates scalar accross halves in vector", VdV.uh[i] = RtV) + +ITERATOR_INSN_MPY_SLOT_LATE(8,lvsplatb, "Vd32.b=vsplat(Rt32)", "Replicates scalar accross bytes in vector", VdV.ub[i] = RtV) + + +ITERATOR_INSN_ANY_SLOT(32,vassign,"Vd32=Vu32","Copy a vector",VdV.w[i]=VuV.w[i]) + + +ITERATOR_INSN_ANY_SLOT_DOUBLE_VEC(8,vcombine,"Vdd32=vcombine(Vu32,Vv32)", +"Vector assign, Any two to Vector Pair", + VddV.v[0].ub[i] = VvV.ub[i]; + VddV.v[1].ub[i] = VuV.ub[i]) + + + +/////////////////////////////////////////////////////////////////////////// + + +/********************************************************* +* GENERAL PERMUTE NETWORKS +*********************************************************/ + + +EXTINSN(V6_vdelta, "Vd32=vdelta(Vu32,Vv32)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VP), +"Reverse Benes Butterfly network ", +{ + fHIDE(int offset;) + fHIDE(int k;) + fHIDE(mmvector_t tmp;) + tmp = VuV; + for (offset=fVBYTES(); (offset>>=1)>0; ) { + for (k = 0; k>3; \ + unsigned char element = value & 7; \ + READ_EXT_VREG(regno,tmp,0); \ + tmp.uh[(128/16)*lane+(element)]++; \ + WRITE_EXT_VREG(regno,tmp,EXT_NEW); \ + } \ + } + +#define fHISTQ(INPUTVEC,QVAL) \ + fUARCH_NOTE_PUMP_4X(); \ + fHIDE(int lane;) \ + fHIDE(mmvector_t tmp;) \ + fVFOREACH(128, lane) { \ + for (fHIDE(int )i=0; i<128/8; ++i) { \ + unsigned char value = INPUTVEC.ub[(128/8)*lane+i]; \ + unsigned char regno = value>>3; \ + unsigned char element = value & 7; \ + READ_EXT_VREG(regno,tmp,0); \ + if (fGETQBIT(QVAL,128/8*lane+i)) tmp.uh[(128/16)*lane+(element)]++; \ + WRITE_EXT_VREG(regno,tmp,EXT_NEW); \ + } \ + } + + + +EXTINSN(V6_vhist, "vhist",ATTRIBS(A_EXTENSION,A_CVI,A_CVI_4SLOT), "vhist instruction",{ fHIDE(mmvector_t inputVec;) inputVec=fTMPVDATA(); fHIST(inputVec); }) +EXTINSN(V6_vhistq, "vhist(Qv4)",ATTRIBS(A_EXTENSION,A_CVI,A_CVI_4SLOT), "vhist instruction",{ fHIDE(mmvector_t inputVec;) inputVec=fTMPVDATA(); fHISTQ(inputVec,QvV); }) + +#undef fHIST +#undef fHISTQ + + +/* **** WEIGHTED HISTOGRAM **** */ + + +#if 1 +#define WHIST(EL,MASK,BSHIFT,COND,SATF) \ + fHIDE(unsigned int) bucket = fGETUBYTE(0,input.h[i]); \ + fHIDE(unsigned int) weight = fGETUBYTE(1,input.h[i]); \ + fHIDE(unsigned int) vindex = (bucket >> 3) & 0x1F; \ + fHIDE(unsigned int) elindex = ((i>>BSHIFT) & (~MASK)) | ((bucket>>BSHIFT) & MASK); \ + fHIDE(mmvector_t tmp;) \ + READ_EXT_VREG(vindex,tmp,0); \ + COND tmp.EL[elindex] = SATF(tmp.EL[elindex] + weight); \ + WRITE_EXT_VREG(vindex,tmp,EXT_NEW); \ + fUARCH_NOTE_PUMP_2X(); + +ITERATOR_INSN_VHISTLIKE(16,vwhist256,"vwhist256","vector weighted histogram halfword counters", WHIST(uh,7,0,,)) +ITERATOR_INSN_VHISTLIKE(16,vwhist256q,"vwhist256(Qv4)","vector weighted histogram halfword counters", WHIST(uh,7,0,if (fGETQBIT(QvV,2*i)),)) +ITERATOR_INSN_VHISTLIKE(16,vwhist256_sat,"vwhist256:sat","vector weighted histogram halfword counters", WHIST(uh,7,0,,fVSATUH)) +ITERATOR_INSN_VHISTLIKE(16,vwhist256q_sat,"vwhist256(Qv4):sat","vector weighted histogram halfword counters", WHIST(uh,7,0,if (fGETQBIT(QvV,2*i)),fVSATUH)) +ITERATOR_INSN_VHISTLIKE(16,vwhist128,"vwhist128","vector weighted histogram word counters", WHIST(uw,3,1,,)) +ITERATOR_INSN_VHISTLIKE(16,vwhist128q,"vwhist128(Qv4)","vector weighted histogram word counters", WHIST(uw,3,1,if (fGETQBIT(QvV,2*i)),)) +ITERATOR_INSN_VHISTLIKE(16,vwhist128m,"vwhist128(#u1)","vector weighted histogram word counters", WHIST(uw,3,1,if ((bucket & 1) == uiV),)) +ITERATOR_INSN_VHISTLIKE(16,vwhist128qm,"vwhist128(Qv4,#u1)","vector weighted histogram word counters", WHIST(uw,3,1,if (((bucket & 1) == uiV) && fGETQBIT(QvV,2*i)),)) + + +#endif + + + +/* ****** lookup table instructions *********** */ + +/* Use low bits from idx to choose next-bigger elements from vector, then use LSB from idx to choose odd or even element */ + +ITERATOR_INSN_PERMUTE_SLOT(8,vlutvvb,"Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8)","vector-vector table lookup", +fHIDE(unsigned int idx;) fHIDE(int matchval;) fHIDE(int oddhalf;) +matchval = RtV & 0x7; +oddhalf = (RtV >> (fVECLOGSIZE()-6)) & 0x1; +idx = VuV.ub[i]; +VdV.b[i] = ((idx & 0xE0) == (matchval << 5)) ? fGETBYTE(oddhalf,VvV.h[idx % fVELEM(16)]) : 0) + + +ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(8,vlutvvb_oracc,"Vx32.b|=vlut32(Vu32.b,Vv32.b,Rt8)","vector-vector table lookup", +fHIDE(unsigned int idx;) fHIDE(int matchval;) fHIDE(int oddhalf;) +matchval = RtV & 0x7; +oddhalf = (RtV >> (fVECLOGSIZE()-6)) & 0x1; +idx = VuV.ub[i]; +VxV.b[i] |= ((idx & 0xE0) == (matchval << 5)) ? fGETBYTE(oddhalf,VvV.h[idx % fVELEM(16)]) : 0) + +ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(16,vlutvwh,"Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8)","vector-vector table lookup", +fHIDE(unsigned int idx;) fHIDE(int matchval;) fHIDE(int oddhalf;) +matchval = RtV & 0xF; +oddhalf = (RtV >> (fVECLOGSIZE()-6)) & 0x1; +idx = fGETUBYTE(0,VuV.uh[i]); +VddV.v[0].h[i] = ((idx & 0xF0) == (matchval << 4)) ? fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]) : 0; +idx = fGETUBYTE(1,VuV.uh[i]); +VddV.v[1].h[i] = ((idx & 0xF0) == (matchval << 4)) ? fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]) : 0) + +ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(16,vlutvwh_oracc,"Vxx32.h|=vlut16(Vu32.b,Vv32.h,Rt8)","vector-vector table lookup", +fHIDE(unsigned int idx;) fHIDE(int matchval;) fHIDE(int oddhalf;) +matchval = fGETUBYTE(0,RtV) & 0xF; +oddhalf = (RtV >> (fVECLOGSIZE()-6)) & 0x1; +idx = fGETUBYTE(0,VuV.uh[i]); +VxxV.v[0].h[i] |= ((idx & 0xF0) == (matchval << 4)) ? fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]) : 0; +idx = fGETUBYTE(1,VuV.uh[i]); +VxxV.v[1].h[i] |= ((idx & 0xF0) == (matchval << 4)) ? fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]) : 0) + +ITERATOR_INSN_PERMUTE_SLOT(8,vlutvvbi,"Vd32.b=vlut32(Vu32.b,Vv32.b,#u3)","vector-vector table lookup", +fHIDE(unsigned int idx;) fHIDE(int matchval;) fHIDE(int oddhalf;) +matchval = uiV & 0x7; +oddhalf = (uiV >> (fVECLOGSIZE()-6)) & 0x1; +idx = VuV.ub[i]; +VdV.b[i] = ((idx & 0xE0) == (matchval << 5)) ? fGETBYTE(oddhalf,VvV.h[idx % fVELEM(16)]) : 0) + + +ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(8,vlutvvb_oracci,"Vx32.b|=vlut32(Vu32.b,Vv32.b,#u3)","vector-vector table lookup", +fHIDE(unsigned int idx;) fHIDE(int matchval;) fHIDE(int oddhalf;) +matchval = uiV & 0x7; +oddhalf = (uiV >> (fVECLOGSIZE()-6)) & 0x1; +idx = VuV.ub[i]; +VxV.b[i] |= ((idx & 0xE0) == (matchval << 5)) ? fGETBYTE(oddhalf,VvV.h[idx % fVELEM(16)]) : 0) + +ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(16,vlutvwhi,"Vdd32.h=vlut16(Vu32.b,Vv32.h,#u3)","vector-vector table lookup", +fHIDE(unsigned int idx;) fHIDE(int matchval;) fHIDE(int oddhalf;) +matchval = uiV & 0xF; +oddhalf = (uiV >> (fVECLOGSIZE()-6)) & 0x1; +idx = fGETUBYTE(0,VuV.uh[i]); +VddV.v[0].h[i] = ((idx & 0xF0) == (matchval << 4)) ? fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]) : 0; +idx = fGETUBYTE(1,VuV.uh[i]); +VddV.v[1].h[i] = ((idx & 0xF0) == (matchval << 4)) ? fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]) : 0) + +ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(16,vlutvwh_oracci,"Vxx32.h|=vlut16(Vu32.b,Vv32.h,#u3)","vector-vector table lookup", +fHIDE(unsigned int idx;) fHIDE(int matchval;) fHIDE(int oddhalf;) +matchval = uiV & 0xF; +oddhalf = (uiV >> (fVECLOGSIZE()-6)) & 0x1; +idx = fGETUBYTE(0,VuV.uh[i]); +VxxV.v[0].h[i] |= ((idx & 0xF0) == (matchval << 4)) ? fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]) : 0; +idx = fGETUBYTE(1,VuV.uh[i]); +VxxV.v[1].h[i] |= ((idx & 0xF0) == (matchval << 4)) ? fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]) : 0) + +ITERATOR_INSN_PERMUTE_SLOT(8,vlutvvb_nm,"Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8):nomatch","vector-vector table lookup", +fHIDE(unsigned int idx;) fHIDE(int oddhalf;) fHIDE(int matchval;) + matchval = RtV & 0x7; + oddhalf = (RtV >> (fVECLOGSIZE()-6)) & 0x1; + idx = VuV.ub[i]; + idx = (idx&0x1F) | (matchval<<5); + VdV.b[i] = fGETBYTE(oddhalf,VvV.h[idx % fVELEM(16)])) + +ITERATOR_INSN_PERMUTE_SLOT_DOUBLE_VEC(16,vlutvwh_nm,"Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8):nomatch","vector-vector table lookup", +fHIDE(unsigned int idx;) fHIDE(int oddhalf;) fHIDE(int matchval;) + matchval = RtV & 0xF; + oddhalf = (RtV >> (fVECLOGSIZE()-6)) & 0x1; + idx = fGETUBYTE(0,VuV.uh[i]); + idx = (idx&0x0F) | (matchval<<4); + VddV.v[0].h[i] = fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)]); + idx = fGETUBYTE(1,VuV.uh[i]); + idx = (idx&0x0F) | (matchval<<4); + VddV.v[1].h[i] = fGETHALF(oddhalf,VvV.w[idx % fVELEM(32)])) + + + + +/****************************************************************************** +NON LINEAR - V65 + ******************************************************************************/ + +ITERATOR_INSN_SLOT2_DOUBLE_VEC(16,vmpahhsat,"Vx32.h=vmpa(Vx32.h,Vu32.h,Rtt32.h):sat","piecewise linear approximation", + VxV.h[i]= fVSATH( ( ( fMPY16SS(VxV.h[i],VuV.h[i])<<1) + (fGETHALF(( (VuV.h[i]>>14)&0x3), RttV )<<15))>>16)) + + +ITERATOR_INSN_SLOT2_DOUBLE_VEC(16,vmpauhuhsat,"Vx32.h=vmpa(Vx32.h,Vu32.uh,Rtt32.uh):sat","piecewise linear approximation", + VxV.h[i]= fVSATH( ( fMPY16SU(VxV.h[i],VuV.uh[i]) + (fGETUHALF(((VuV.uh[i]>>14)&0x3), RttV )<<15))>>16)) + +ITERATOR_INSN_SLOT2_DOUBLE_VEC(16,vmpsuhuhsat,"Vx32.h=vmps(Vx32.h,Vu32.uh,Rtt32.uh):sat","piecewise linear approximation", + VxV.h[i]= fVSATH( ( fMPY16SU(VxV.h[i],VuV.uh[i]) - (fGETUHALF(((VuV.uh[i]>>14)&0x3), RttV )<<15))>>16)) + + +ITERATOR_INSN_SLOT2_DOUBLE_VEC(16,vlut4,"Vd32.h=vlut4(Vu32.uh,Rtt32.h)","4 entry lookup table", + VdV.h[i]= fGETHALF( ((VuV.h[i]>>14)&0x3), RttV )) + + + +/****************************************************************************** +V65 + ******************************************************************************/ + +ITERATOR_INSN_MPY_SLOT_NOV1(32,vmpyuhe,"Vd32.uw=vmpye(Vu32.uh,Rt32.uh)", +"Vector even halfword unsigned multiply by scalar", + VdV.uw[i] = fMPY16UU(fGETUHALF(0, VuV.uw[i]),fGETUHALF(0,RtV))) + + +ITERATOR_INSN_MPY_SLOT_NOV1(32,vmpyuhe_acc,"Vx32.uw+=vmpye(Vu32.uh,Rt32.uh)", +"Vector even halfword unsigned multiply by scalar", + VxV.uw[i] += fMPY16UU(fGETUHALF(0, VuV.uw[i]),fGETUHALF(0,RtV))) + + + + +EXTINSN(V6_vgathermw, "vtmp.w=vgather(Rt32,Mu2,Vv32.w).w", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_GATHER,A_CVI_VA,A_CVI_VM,A_CVI_TMP_DST,A_MEMLIKE), "Gather Words", +{ + fHIDE(int i;) + fHIDE(int element_size = 4;) + fHIDE(fGATHER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(32, i) { + EA = RtV+VvV.uw[i]; + fVLOG_VTCM_GATHER_WORD(EA, VvV.uw[i], i,MuV); + } + fGATHER_FINISH() +}) +EXTINSN(V6_vgathermh, "vtmp.h=vgather(Rt32,Mu2,Vv32.h).h", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_GATHER,A_CVI_VA,A_CVI_VM,A_CVI_TMP_DST,A_MEMLIKE), "Gather halfwords", +{ + fHIDE(int i;) + fHIDE(int element_size = 2;) + fHIDE(fGATHER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(16, i) { + EA = RtV+VvV.uh[i]; + fVLOG_VTCM_GATHER_HALFWORD(EA, VvV.uh[i], i,MuV); + } + fGATHER_FINISH() +}) + + + +EXTINSN(V6_vgathermhw, "vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_GATHER,A_CVI_VA_DV,A_CVI_VM,A_CVI_TMP_DST,A_MEMLIKE), "Gather halfwords", +{ + fHIDE(int i;) + fHIDE(int j;) + fHIDE(int element_size = 2;) + fHIDE(fGATHER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(32, i) { + for(j = 0; j < 2; j++) { + EA = RtV+VvvV.v[j].uw[i]; + fVLOG_VTCM_GATHER_HALFWORD_DV(EA, VvvV.v[j].uw[i], (2*i+j),i,j,MuV); + } + } + fGATHER_FINISH() +}) + + +EXTINSN(V6_vgathermwq, "if (Qs4) vtmp.w=vgather(Rt32,Mu2,Vv32.w).w", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_GATHER,A_CVI_VA,A_CVI_VM,A_CVI_TMP_DST,A_MEMLIKE), "Gather Words", +{ + fHIDE(int i;) + fHIDE(int element_size = 4;) + fHIDE(fGATHER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(32, i) { + EA = RtV+VvV.uw[i]; + fVLOG_VTCM_GATHER_WORDQ(EA, VvV.uw[i], i,QsV,MuV); + } + fGATHER_FINISH() +}) +EXTINSN(V6_vgathermhq, "if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vv32.h).h", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_GATHER,A_CVI_VA,A_CVI_VM,A_CVI_TMP_DST,A_MEMLIKE), "Gather halfwords", +{ + fHIDE(int i;) + fHIDE(int element_size = 2;) + fHIDE(fGATHER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(16, i) { + EA = RtV+VvV.uh[i]; + fVLOG_VTCM_GATHER_HALFWORDQ(EA, VvV.uh[i], i,QsV,MuV); + } + fGATHER_FINISH() +}) + + + +EXTINSN(V6_vgathermhwq, "if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_GATHER,A_CVI_VA_DV,A_CVI_VM,A_CVI_TMP_DST,A_MEMLIKE), "Gather halfwords", +{ + fHIDE(int i;) + fHIDE(int j;) + fHIDE(int element_size = 2;) + fHIDE(fGATHER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(32, i) { + for(j = 0; j < 2; j++) { + EA = RtV+VvvV.v[j].uw[i]; + fVLOG_VTCM_GATHER_HALFWORDQ_DV(EA, VvvV.v[j].uw[i], (2*i+j),i,j,QsV,MuV); + } + } + fGATHER_FINISH() +}) + + + +EXTINSN(V6_vscattermw , "vscatter(Rt32,Mu2,Vv32.w).w=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA,A_CVI_VM,A_MEMLIKE), "Scatter Words", +{ + fHIDE(int i;) + fHIDE(int element_size = 4;) + fHIDE(fSCATTER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(32, i) { + EA = RtV+VvV.uw[i]; + fVLOG_VTCM_WORD(EA, VvV.uw[i], VwV,i,MuV); + } + fSCATTER_FINISH(0) +}) + + + +EXTINSN(V6_vscattermh , "vscatter(Rt32,Mu2,Vv32.h).h=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA,A_CVI_VM,A_MEMLIKE), "Scatter halfWords", +{ + fHIDE(int i;) + fHIDE(int element_size = 2;) + fHIDE(fSCATTER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(16, i) { + EA = RtV+VvV.uh[i]; + fVLOG_VTCM_HALFWORD(EA,VvV.uh[i],VwV,i,MuV); + } + fSCATTER_FINISH(0) +}) + + +EXTINSN(V6_vscattermw_add, "vscatter(Rt32,Mu2,Vv32.w).w+=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA,A_CVI_VM,A_MEMLIKE), "Scatter Words-Add", +{ + fHIDE(int i;) + fHIDE(int ALIGNMENT=4;) + fHIDE(int element_size = 4;) + fHIDE(fSCATTER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(32, i) { + EA = (RtV+fVALIGN(VvV.uw[i],ALIGNMENT)); + fVLOG_VTCM_WORD_INCREMENT(EA,VvV.uw[i],VwV,i,ALIGNMENT,MuV); + } + fHIDE(fLOG_SCATTER_OP(4);) + fSCATTER_FINISH(1) +}) + +EXTINSN(V6_vscattermh_add, "vscatter(Rt32,Mu2,Vv32.h).h+=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA,A_CVI_VM,A_MEMLIKE), "Scatter halfword-Add", +{ + fHIDE(int i;) + fHIDE(int ALIGNMENT=2;) + fHIDE(int element_size = 2;) + fHIDE(fSCATTER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(16, i) { + EA = (RtV+fVALIGN(VvV.uh[i],ALIGNMENT)); + fVLOG_VTCM_HALFWORD_INCREMENT(EA,VvV.uh[i],VwV,i,ALIGNMENT,MuV); + } + fHIDE(fLOG_SCATTER_OP(2);) + fSCATTER_FINISH(1) +}) + + +EXTINSN(V6_vscattermwq, "if (Qs4) vscatter(Rt32,Mu2,Vv32.w).w=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA,A_CVI_VM,A_MEMLIKE), "Scatter Words conditional", +{ + fHIDE(int i;) + fHIDE(int element_size = 4;) + fHIDE(fSCATTER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(32, i) { + EA = RtV+VvV.uw[i]; + fVLOG_VTCM_WORDQ(EA,VvV.uw[i], VwV,i,QsV,MuV); + } + fSCATTER_FINISH(0) +}) + +EXTINSN(V6_vscattermhq, "if (Qs4) vscatter(Rt32,Mu2,Vv32.h).h=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA,A_CVI_VM,A_MEMLIKE), "Scatter HalfWords conditional", +{ + fHIDE(int i;) + fHIDE(int element_size = 2;) + fHIDE(fSCATTER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(16, i) { + EA = RtV+VvV.uh[i]; + fVLOG_VTCM_HALFWORDQ(EA,VvV.uh[i],VwV,i,QsV,MuV); + } + fSCATTER_FINISH(0) +}) + + + + +EXTINSN(V6_vscattermhw , "vscatter(Rt32,Mu2,Vvv32.w).h=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA_DV,A_CVI_VM,A_MEMLIKE), "Scatter Words", +{ + fHIDE(int i;) + fHIDE(int j;) + fHIDE(int element_size = 2;) + fHIDE(fSCATTER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(32, i) { + for(j = 0; j < 2; j++) { + EA = RtV+VvvV.v[j].uw[i]; + fVLOG_VTCM_HALFWORD_DV(EA,VvvV.v[j].uw[i],VwV,(2*i+j),i,j,MuV); + } + } + fSCATTER_FINISH(0) +}) + + + +EXTINSN(V6_vscattermhwq, "if (Qs4) vscatter(Rt32,Mu2,Vvv32.w).h=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA_DV,A_CVI_VM,A_MEMLIKE), "Scatter halfwords conditional", +{ + fHIDE(int i;) + fHIDE(int j;) + fHIDE(int element_size = 2;) + fHIDE(fSCATTER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(32, i) { + for(j = 0; j < 2; j++) { + EA = RtV+VvvV.v[j].uw[i]; + fVLOG_VTCM_HALFWORDQ_DV(EA,VvvV.v[j].uw[i],VwV,(2*i+j),QsV,i,j,MuV); + } + } + fSCATTER_FINISH(0) +}) + +EXTINSN(V6_vscattermhw_add, "vscatter(Rt32,Mu2,Vvv32.w).h+=Vw32", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_SCATTER,A_CVI_VA_DV,A_CVI_VM,A_MEMLIKE), "Scatter halfwords-add", +{ + fHIDE(int i;) + fHIDE(int j;) + fHIDE(int ALIGNMENT=2;) + fHIDE(int element_size = 2;) + fHIDE(fSCATTER_INIT( RtV, MuV, element_size);) + fVLASTBYTE(MuV, element_size); + fVALIGN(RtV, element_size); + fVFOREACH(32, i) { + for(j = 0; j < 2; j++) { + EA = RtV + fVALIGN(VvvV.v[j].uw[i],ALIGNMENT);; + fVLOG_VTCM_HALFWORD_INCREMENT_DV(EA,VvvV.v[j].uw[i],VwV,(2*i+j),i,j,ALIGNMENT,MuV); + } + } + fHIDE(fLOG_SCATTER_OP(2);) + fSCATTER_FINISH(1) +}) + +EXTINSN(V6_vprefixqb,"Vd32.b=prefixsum(Qv4)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VS), "parallel prefix sum of Q into byte", +{ + fHIDE(int i;) + fHIDE(size1u_t acc = 0;) + fVFOREACH(8, i) { + acc += fGETQBIT(QvV,i); + VdV.ub[i] = acc; + } + } ) +EXTINSN(V6_vprefixqh,"Vd32.h=prefixsum(Qv4)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VS), "parallel prefix sum of Q into halfwords", +{ + fHIDE(int i;) + fHIDE(size2u_t acc = 0;) + fVFOREACH(16, i) { + acc += fGETQBIT(QvV,i*2+0); + acc += fGETQBIT(QvV,i*2+1); + VdV.uh[i] = acc; + } + } ) +EXTINSN(V6_vprefixqw,"Vd32.w=prefixsum(Qv4)", ATTRIBS(A_EXTENSION,A_CVI,A_CVI_VS), "parallel prefix sum of Q into words", +{ + fHIDE(int i;) + fHIDE(size4u_t acc = 0;) + fVFOREACH(32, i) { + acc += fGETQBIT(QvV,i*4+0); + acc += fGETQBIT(QvV,i*4+1); + acc += fGETQBIT(QvV,i*4+2); + acc += fGETQBIT(QvV,i*4+3); + VdV.uw[i] = acc; + } + } ) + + + + + +/****************************************************************************** + DEBUG Vector/Register Printing + ******************************************************************************/ + +#define PRINT_VU(TYPE, TYPE2, COUNT)\ + int i; \ + size4u_t vec_len = fVBYTES();\ + fprintf(stdout,"V%2d: ",VuN); \ + for (i=0;i>COUNT;i++) { \ + fprintf(stdout,TYPE2 " ", VuV.TYPE[i]); \ + }; \ + fprintf(stdout,"\\n"); \ + fflush(stdout);\ + +#undef ATTR_VMEM +#undef ATTR_VMEMU +#undef ATTR_VMEM_NT + +#endif /* NO_MMVEC */ + +#ifdef __SELF_DEF_EXTINSN +#undef EXTINSN +#undef __SELF_DEF_EXTINSN +#endif diff --git a/target/hexagon/imported/mmvec/macros.def b/target/hexagon/imported/mmvec/macros.def new file mode 100755 index 0000000000..7e5438a998 --- /dev/null +++ b/target/hexagon/imported/mmvec/macros.def @@ -0,0 +1,842 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +DEF_MACRO(fDUMPQ, + do { + printf(STR ":" #REG ": 0x%016llx\n",REG.ud[0]); + } while (0), + () +) + +DEF_MACRO(fUSE_LOOKUP_ADDRESS_BY_REV, + PROC->arch_proc_options->mmvec_use_full_va_for_lookup, + () +) + +DEF_MACRO(fUSE_LOOKUP_ADDRESS, + 1, + () +) + +DEF_MACRO(fNOTQ, + ({mmqreg_t _ret = {0}; int _i_; for (_i_ = 0; _i_ < fVECSIZE()/64; _i_++) _ret.ud[_i_] = ~VAL.ud[_i_]; _ret;}), + () +) + +DEF_MACRO(fGETQBITS, + ((MASK) & (REG.w[(BITNO)>>5] >> ((BITNO) & 0x1f))), + () +) + +DEF_MACRO(fGETQBIT, + fGETQBITS(REG,1,1,BITNO), + () +) + +DEF_MACRO(fGENMASKW, + (((fGETQBIT(QREG,(IDX*4+0)) ? 0xFF : 0x0) << 0) + |((fGETQBIT(QREG,(IDX*4+1)) ? 0xFF : 0x0) << 8) + |((fGETQBIT(QREG,(IDX*4+2)) ? 0xFF : 0x0) << 16) + |((fGETQBIT(QREG,(IDX*4+3)) ? 0xFF : 0x0) << 24)), + () +) +DEF_MACRO(fGET10BIT, + { + COE = (((((fGETUBYTE(3,VAL) >> (2 * POS)) & 3) << 8) | fGETUBYTE(POS,VAL)) << 6); + COE >>= 6; + }, + () +) + +DEF_MACRO(fVMAX, + (X>Y) ? X : Y, + () +) + + +DEF_MACRO(fGETNIBBLE, + ( fSXTN(4,8,(SRC >> (4*IDX)) & 0xF) ), + () +) + +DEF_MACRO(fGETCRUMB, + ( fSXTN(2,8,(SRC >> (2*IDX)) & 0x3) ), + () +) + +DEF_MACRO(fGETCRUMB_SYMMETRIC, + ( (fGETCRUMB(IDX,SRC)>=0 ? (2-fGETCRUMB(IDX,SRC)) : fGETCRUMB(IDX,SRC) ) ), + () +) + +#define ZERO_OFFSET_2B + + +DEF_MACRO(fGENMASKH, + (((fGETQBIT(QREG,(IDX*2+0)) ? 0xFF : 0x0) << 0) + |((fGETQBIT(QREG,(IDX*2+1)) ? 0xFF : 0x0) << 8)), + () +) + +DEF_MACRO(fGETMASKW, + (VREG.w[IDX] & fGENMASKW((QREG),IDX)), + () +) + +DEF_MACRO(fGETMASKH, + (VREG.h[IDX] & fGENMASKH((QREG),IDX)), + () +) + +DEF_MACRO(fCONDMASK8, + (fGETQBIT(QREG,IDX) ? (YESVAL) : (NOVAL)), + () +) + +DEF_MACRO(fCONDMASK16, + ((fGENMASKH(QREG,IDX) & (YESVAL)) | (fGENMASKH(fNOTQ(QREG),IDX) & (NOVAL))), + () +) + +DEF_MACRO(fCONDMASK32, + ((fGENMASKW(QREG,IDX) & (YESVAL)) | (fGENMASKW(fNOTQ(QREG),IDX) & (NOVAL))), + () +) + + +DEF_MACRO(fSETQBITS, + do { + size4u_t __TMP = (VAL); + REG.w[(BITNO)>>5] &= ~((MASK) << ((BITNO) & 0x1f)); + REG.w[(BITNO)>>5] |= (((__TMP) & (MASK)) << ((BITNO) & 0x1f)); + } while (0), + () +) + +DEF_MACRO(fSETQBIT, + fSETQBITS(REG,1,1,BITNO,VAL), + () +) + +DEF_MACRO(fVBYTES, + (fVECSIZE()), + () +) + +DEF_MACRO(fVHALVES, + (fVECSIZE()/2), + () +) + +DEF_MACRO(fVWORDS, + (fVECSIZE()/4), + () +) + +DEF_MACRO(fVDWORDS, + (fVECSIZE()/8), + () +) + +DEF_MACRO(fVALIGN, + ( ADDR = ADDR & ~(LOG2_ALIGNMENT-1)), + () +) + +DEF_MACRO(fVLASTBYTE, + ( ADDR = ADDR | (LOG2_ALIGNMENT-1)), + () +) + + +DEF_MACRO(fVELEM, + ((fVECSIZE()*8)/WIDTH), + () +) + +DEF_MACRO(fVECLOGSIZE, + (mmvec_current_veclogsize(thread)), + () +) + +DEF_MACRO(fVECSIZE, + (1<VRegs_updated & (((VRegMask)1)<future_VRegs[VNUM] : mmvec_zero_vector()), + (A_DOTNEWVALUE,A_RESTRICT_SLOT0ONLY) +) + +DEF_MACRO( + fV_AL_CHECK, + if ((EA) & (MASK)) { + warn("aligning misaligned vector. PC=%08x EA=%08x",thread->Regs[REG_PC],(EA)); + }, + () +) +DEF_MACRO(fSCATTER_INIT, + { + mem_vector_scatter_init(thread, insn, REGION_START, LENGTH, ELEMENT_SIZE); + if (EXCEPTION_DETECTED) return; + }, + (A_STORE,A_MEMLIKE,A_RESTRICT_SLOT0ONLY) +) + +DEF_MACRO(fGATHER_INIT, + { + mem_vector_gather_init(thread, insn, REGION_START, LENGTH, ELEMENT_SIZE); + if (EXCEPTION_DETECTED) return; + }, + (A_LOAD,A_MEMLIKE,A_RESTRICT_SLOT1ONLY) +) + +DEF_MACRO(fSCATTER_FINISH, + { + if (EXCEPTION_DETECTED) return; + mem_vector_scatter_finish(thread, insn, OP); + }, + () +) + +DEF_MACRO(fGATHER_FINISH, + { + if (EXCEPTION_DETECTED) return; + mem_vector_gather_finish(thread, insn); + }, + () +) + + +DEF_MACRO(CHECK_VTCM_PAGE, + { + int slot = insn->slot; + paddr_t pa = thread->mem_access[slot].paddr+OFFSET; + pa = pa & ~(ALIGNMENT-1); + FLAG = (pa < (thread->mem_access[slot].paddr+LENGTH)); + }, + () +) +DEF_MACRO(COUNT_OUT_OF_BOUNDS, + { + if (!FLAG) + { + THREAD2STRUCT->vtcm_log.oob_access += SIZE; + warn("Scatter/Gather out of bounds of region"); + } + }, + () +) + +DEF_MACRO(fLOG_SCATTER_OP, + { + // Log the size and indicate that the extension ext.c file needs to increment right before memory write + THREAD2STRUCT->vtcm_log.op = 1; + THREAD2STRUCT->vtcm_log.op_size = SIZE; + }, + () +) + + + +DEF_MACRO(fVLOG_VTCM_WORD_INCREMENT, + { + int slot = insn->slot; + int log_bank = 0; + int log_byte =0; + paddr_t pa = thread->mem_access[slot].paddr+(OFFSET & ~(ALIGNMENT-1)); + paddr_t pa_high = thread->mem_access[slot].paddr+LEN; + for(int i0 = 0; i0 < 4; i0++) + { + log_byte = ((OFFSET>=0)&&((pa+i0)<=pa_high)); + log_bank |= (log_byte<slot; + int log_bank = 0; + int log_byte = 0; + paddr_t pa = thread->mem_access[slot].paddr+(OFFSET & ~(ALIGNMENT-1)); + paddr_t pa_high = thread->mem_access[slot].paddr+LEN; + for(int i0 = 0; i0 < 2; i0++) { + log_byte = ((OFFSET>=0)&&((pa+i0)<=pa_high)); + log_bank |= (log_byte<slot; + int log_bank = 0; + int log_byte = 0; + paddr_t pa = thread->mem_access[slot].paddr+(OFFSET & ~(ALIGNMENT-1)); + paddr_t pa_high = thread->mem_access[slot].paddr+LEN; + for(int i0 = 0; i0 < 2; i0++) { + log_byte = ((OFFSET>=0)&&((pa+i0)<=pa_high)); + log_bank |= (log_byte<slot; + int i0; + paddr_t pa = thread->mem_access[slot].paddr+OFFSET; + paddr_t pa_high = thread->mem_access[slot].paddr+LEN; + int log_bank = 0; + int log_byte = 0; + for(i0 = 0; i0 < ELEMENT_SIZE; i0++) + { + log_byte = ((OFFSET>=0)&&((pa+i0)<=pa_high)) && QVAL; + log_bank |= (log_byte<system_ptr, thread->threadId, thread->mem_access[slot].paddr+OFFSET+i0); + THREAD2STRUCT->tmp_VRegs[0].ub[ELEMENT_SIZE*IDX+i0] = B; + LOG_VTCM_BYTE(pa+i0,log_byte,B,ELEMENT_SIZE*IDX+i0); + } + LOG_VTCM_BANK(pa, log_bank,BANK_IDX); +}, +() +) + + + +DEF_MACRO(fVLOG_VTCM_GATHER_WORD, + { + GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 4, IDX, 1); + }, + () +) +DEF_MACRO(fVLOG_VTCM_GATHER_HALFWORD, + { + GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 2, IDX, 1); + }, + () +) +DEF_MACRO(fVLOG_VTCM_GATHER_HALFWORD_DV, + { + GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 2, (2*IDX2+IDX_H), 1); + }, + () +) +DEF_MACRO(fVLOG_VTCM_GATHER_WORDQ, + { + GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 4, IDX, fGETQBIT(QsV,4*IDX+i0)); + }, + () +) +DEF_MACRO(fVLOG_VTCM_GATHER_HALFWORDQ, + { + GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 2, IDX, fGETQBIT(QsV,2*IDX+i0)); + }, + () +) + +DEF_MACRO(fVLOG_VTCM_GATHER_HALFWORDQ_DV, + { + GATHER_FUNCTION(EA,OFFSET,IDX, LEN, 2, (2*IDX2+IDX_H), fGETQBIT(QsV,2*IDX+i0)); + }, + () +) + + +DEF_MACRO(DEBUG_LOG_ADDR, + { + + if (thread->processor_ptr->arch_proc_options->mmvec_network_addr_log2) + { + + int slot = insn->slot; + paddr_t pa = thread->mem_access[slot].paddr+OFFSET; + } + }, + () +) + + + + + + + +DEF_MACRO(SCATTER_OP_WRITE_TO_MEM, + { + for (int i = 0; i < mmvecx->vtcm_log.size; i+=sizeof(TYPE)) + { + if ( mmvecx->vtcm_log.mask.ub[i] != 0) { + TYPE dst = 0; + TYPE inc = 0; + for(int j = 0; j < sizeof(TYPE); j++) { + dst |= (sim_mem_read1(thread->system_ptr, thread->threadId, mmvecx->vtcm_log.pa[i+j]) << (8*j)); + inc |= mmvecx->vtcm_log.data.ub[j+i] << (8*j); + + mmvecx->vtcm_log.mask.ub[j+i] = 0; + mmvecx->vtcm_log.data.ub[j+i] = 0; + mmvecx->vtcm_log.offsets.ub[j+i] = 0; + } + dst += inc; + for(int j = 0; j < sizeof(TYPE); j++) { + sim_mem_write1(thread->system_ptr,thread->threadId, mmvecx->vtcm_log.pa[i+j], (dst >> (8*j))& 0xFF ); + } + } + + } + }, + () +) + +DEF_MACRO(SCATTER_FUNCTION, +{ + int slot = insn->slot; + int i0; + paddr_t pa = thread->mem_access[slot].paddr+OFFSET; + paddr_t pa_high = thread->mem_access[slot].paddr+LEN; + int log_bank = 0; + int log_byte = 0; + for(i0 = 0; i0 < ELEMENT_SIZE; i0++) { + log_byte = ((OFFSET>=0)&&((pa+i0)<=pa_high)) && QVAL; + log_bank |= (log_byte<processor_ptr)); + }, + (A_STORE,A_MEMLIKE) +) + +DEF_MACRO(fVFETCH_AL, + { + fV_AL_CHECK(EA,fVECSIZE()-1); + mem_fetch_vector(thread, insn, EA&~(fVECSIZE()-1), insn->slot, fVECSIZE()); + }, + (A_LOAD,A_MEMLIKE) +) + + +DEF_MACRO(fLOADMMV_AL, + { + fV_AL_CHECK(EA,ALIGNMENT-1); + thread->last_pkt->double_access_vec = 0; + mem_load_vector_oddva(thread, insn, EA&~(ALIGNMENT-1), EA, insn->slot, LEN, &DST.ub[0], LEN, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr)); + }, + (A_LOAD,A_MEMLIKE) +) + +DEF_MACRO(fLOADMMV, + fLOADMMV_AL(EA,fVECSIZE(),fVECSIZE(),DST), + () +) + +DEF_MACRO(fLOADMMVQ, + do { + int __i; + fLOADMMV_AL(EA,fVECSIZE(),fVECSIZE(),DST); + fVFOREACH(8,__i) if (!fGETQBIT(QVAL,__i)) DST.b[__i] = 0; + } while (0), + () +) + +DEF_MACRO(fLOADMMVNQ, + do { + int __i; + fLOADMMV_AL(EA,fVECSIZE(),fVECSIZE(),DST); + fVFOREACH(8,__i) if (fGETQBIT(QVAL,__i)) DST.b[__i] = 0; + } while (0), + () +) + +DEF_MACRO(fLOADMMVU_AL, + { + size4u_t size2 = (EA)&(ALIGNMENT-1); + size4u_t size1 = LEN-size2; + thread->last_pkt->double_access_vec = 1; + mem_load_vector_oddva(thread, insn, EA+size1, EA+fVECSIZE(), /* slot */ 1, size2, &DST.ub[size1], size2, fUSE_LOOKUP_ADDRESS()); + mem_load_vector_oddva(thread, insn, EA, EA,/* slot */ 0, size1, &DST.ub[0], size1, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr)); + }, + (A_LOAD,A_MEMLIKE) +) + +DEF_MACRO(fLOADMMVU, + { + /* if address happens to be aligned, only do aligned load */ + thread->last_pkt->pkt_has_vtcm_access = 0; + thread->last_pkt->pkt_access_count = 0; + if ( (EA & (fVECSIZE()-1)) == 0) { + thread->last_pkt->pkt_has_vmemu_access = 0; + thread->last_pkt->double_access = 0; + + fLOADMMV_AL(EA,fVECSIZE(),fVECSIZE(),DST); + } else { + thread->last_pkt->pkt_has_vmemu_access = 1; + thread->last_pkt->double_access = 1; + + fLOADMMVU_AL(EA,fVECSIZE(),fVECSIZE(),DST); + } + }, + () +) + +DEF_MACRO(fSTOREMMV_AL, + { + fV_AL_CHECK(EA,ALIGNMENT-1); + mem_store_vector_oddva(thread, insn, EA&~(ALIGNMENT-1), EA, insn->slot, LEN, &SRC.ub[0], 0, 0, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr)); + }, + (A_STORE,A_MEMLIKE) +) + +DEF_MACRO(fSTOREMMV, + fSTOREMMV_AL(EA,fVECSIZE(),fVECSIZE(),SRC), + () +) + +DEF_MACRO(fSTOREMMVQ_AL, + do { + mmvector_t maskvec; + int i; + for (i = 0; i < fVECSIZE(); i++) maskvec.ub[i] = fGETQBIT(MASK,i); + mem_store_vector_oddva(thread, insn, EA&~(ALIGNMENT-1), EA, insn->slot, LEN, &SRC.ub[0], &maskvec.ub[0], 0, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr)); + } while (0), + (A_STORE,A_MEMLIKE) +) + +DEF_MACRO(fSTOREMMVQ, + fSTOREMMVQ_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK), + () +) + +DEF_MACRO(fSTOREMMVNQ_AL, + { + mmvector_t maskvec; + int i; + for (i = 0; i < fVECSIZE(); i++) maskvec.ub[i] = fGETQBIT(MASK,i); + fV_AL_CHECK(EA,ALIGNMENT-1); + mem_store_vector_oddva(thread, insn, EA&~(ALIGNMENT-1), EA, insn->slot, LEN, &SRC.ub[0], &maskvec.ub[0], 1, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr)); + }, + (A_STORE,A_MEMLIKE) +) + +DEF_MACRO(fSTOREMMVNQ, + fSTOREMMVNQ_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK), + () +) + +DEF_MACRO(fSTOREMMVU_AL, + { + size4u_t size1 = ALIGNMENT-((EA)&(ALIGNMENT-1)); + size4u_t size2; + if (size1>LEN) size1 = LEN; + size2 = LEN-size1; + mem_store_vector_oddva(thread, insn, EA+size1, EA+fVECSIZE(), /* slot */ 1, size2, &SRC.ub[size1], 0, 0, fUSE_LOOKUP_ADDRESS()); + mem_store_vector_oddva(thread, insn, EA, EA, /* slot */ 0, size1, &SRC.ub[0], 0, 0, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr)); + }, + (A_STORE,A_MEMLIKE) +) + +DEF_MACRO(fSTOREMMVU, + { + thread->last_pkt->pkt_has_vtcm_access = 0; + thread->last_pkt->pkt_access_count = 0; + if ( (EA & (fVECSIZE()-1)) == 0) { + thread->last_pkt->double_access = 0; + fSTOREMMV_AL(EA,fVECSIZE(),fVECSIZE(),SRC); + } else { + thread->last_pkt->double_access = 1; + thread->last_pkt->pkt_has_vmemu_access = 1; + fSTOREMMVU_AL(EA,fVECSIZE(),fVECSIZE(),SRC); + } + }, + () +) + +DEF_MACRO(fSTOREMMVQU_AL, + { + size4u_t size1 = ALIGNMENT-((EA)&(ALIGNMENT-1)); + size4u_t size2; + mmvector_t maskvec; + int i; + for (i = 0; i < fVECSIZE(); i++) maskvec.ub[i] = fGETQBIT(MASK,i); + if (size1>LEN) size1 = LEN; + size2 = LEN-size1; + mem_store_vector_oddva(thread, insn, EA+size1, EA+fVECSIZE(),/* slot */ 1, size2, &SRC.ub[size1], &maskvec.ub[size1], 0, fUSE_LOOKUP_ADDRESS()); + mem_store_vector_oddva(thread, insn, EA, /* slot */ 0, size1, &SRC.ub[0], &maskvec.ub[0], 0, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr)); + }, + (A_STORE,A_MEMLIKE) +) + +DEF_MACRO(fSTOREMMVQU, + { + thread->last_pkt->pkt_has_vtcm_access = 0; + thread->last_pkt->pkt_access_count = 0; + if ( (EA & (fVECSIZE()-1)) == 0) { + thread->last_pkt->double_access = 0; + fSTOREMMVQ_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK); + } else { + thread->last_pkt->double_access = 1; + thread->last_pkt->pkt_has_vmemu_access = 1; + fSTOREMMVQU_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK); + } + }, + () +) + +DEF_MACRO(fSTOREMMVNQU_AL, + { + size4u_t size1 = ALIGNMENT-((EA)&(ALIGNMENT-1)); + size4u_t size2; + mmvector_t maskvec; + int i; + for (i = 0; i < fVECSIZE(); i++) maskvec.ub[i] = fGETQBIT(MASK,i); + if (size1>LEN) size1 = LEN; + size2 = LEN-size1; + mem_store_vector_oddva(thread, insn, EA+size1, EA+fVECSIZE(), /* slot */ 1, size2, &SRC.ub[size1], &maskvec.ub[size1], 1, fUSE_LOOKUP_ADDRESS()); + mem_store_vector_oddva(thread, insn, EA, EA, /* slot */ 0, size1, &SRC.ub[0], &maskvec.ub[0], 1, fUSE_LOOKUP_ADDRESS_BY_REV(thread->processor_ptr)); + }, + (A_STORE,A_MEMLIKE) +) + +DEF_MACRO(fSTOREMMVNQU, + { + thread->last_pkt->pkt_has_vtcm_access = 0; + thread->last_pkt->pkt_access_count = 0; + if ( (EA & (fVECSIZE()-1)) == 0) { + thread->last_pkt->double_access = 0; + fSTOREMMVNQ_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK); + } else { + thread->last_pkt->double_access = 1; + thread->last_pkt->pkt_has_vmemu_access = 1; + fSTOREMMVNQU_AL(EA,fVECSIZE(),fVECSIZE(),SRC,MASK); + } + }, + () +) + + + + +DEF_MACRO(fVFOREACH, + for (VAR = 0; VAR < fVELEM(WIDTH); VAR++), + /* NOTHING */ +) + +DEF_MACRO(fVARRAY_ELEMENT_ACCESS, + ARRAY.v[(INDEX) / (fVECSIZE()/(sizeof(ARRAY.TYPE[0])))].TYPE[(INDEX) % (fVECSIZE()/(sizeof(ARRAY.TYPE[0])))], + () +) + +DEF_MACRO(fVNEWCANCEL, + do { THREAD2STRUCT->VRegs_select &= ~(1<<(REGNUM)); } while (0), + () +) + +DEF_MACRO(fTMPVDATA, + mmvec_vtmp_data(thread), + (A_CVI) +) + +DEF_MACRO(fVSATDW, + fVSATW( ( ( ((long long)U)<<32 ) | fZXTN(32,64,V) ) ), + /* attribs */ +) + +DEF_MACRO(fVASL_SATHI, + fVSATW(((U)<<1) | ((V)>>31)), + /* attribs */ +) + +DEF_MACRO(fVUADDSAT, + fVSATUN( WIDTH, fZXTN(WIDTH, 2*WIDTH, U) + fZXTN(WIDTH, 2*WIDTH, V)), + /* attribs */ +) + +DEF_MACRO(fVSADDSAT, + fVSATN( WIDTH, fSXTN(WIDTH, 2*WIDTH, U) + fSXTN(WIDTH, 2*WIDTH, V)), + /* attribs */ +) + +DEF_MACRO(fVUSUBSAT, + fVSATUN( WIDTH, fZXTN(WIDTH, 2*WIDTH, U) - fZXTN(WIDTH, 2*WIDTH, V)), + /* attribs */ +) + +DEF_MACRO(fVSSUBSAT, + fVSATN( WIDTH, fSXTN(WIDTH, 2*WIDTH, U) - fSXTN(WIDTH, 2*WIDTH, V)), + /* attribs */ +) + +DEF_MACRO(fVAVGU, + ((fZXTN(WIDTH, 2*WIDTH, U) + fZXTN(WIDTH, 2*WIDTH, V))>>1), + /* attribs */ +) + +DEF_MACRO(fVAVGURND, + ((fZXTN(WIDTH, 2*WIDTH, U) + fZXTN(WIDTH, 2*WIDTH, V)+1)>>1), + /* attribs */ +) + +DEF_MACRO(fVNAVGU, + ((fZXTN(WIDTH, 2*WIDTH, U) - fZXTN(WIDTH, 2*WIDTH, V))>>1), + /* attribs */ +) + +DEF_MACRO(fVNAVGURNDSAT, + fVSATUN(WIDTH,((fZXTN(WIDTH, 2*WIDTH, U) - fZXTN(WIDTH, 2*WIDTH, V)+1)>>1)), + /* attribs */ +) + +DEF_MACRO(fVAVGS, + ((fSXTN(WIDTH, 2*WIDTH, U) + fSXTN(WIDTH, 2*WIDTH, V))>>1), + /* attribs */ +) + +DEF_MACRO(fVAVGSRND, + ((fSXTN(WIDTH, 2*WIDTH, U) + fSXTN(WIDTH, 2*WIDTH, V)+1)>>1), + /* attribs */ +) + +DEF_MACRO(fVNAVGS, + ((fSXTN(WIDTH, 2*WIDTH, U) - fSXTN(WIDTH, 2*WIDTH, V))>>1), + /* attribs */ +) + +DEF_MACRO(fVNAVGSRND, + ((fSXTN(WIDTH, 2*WIDTH, U) - fSXTN(WIDTH, 2*WIDTH, V)+1)>>1), + /* attribs */ +) + +DEF_MACRO(fVNAVGSRNDSAT, + fVSATN(WIDTH,((fSXTN(WIDTH, 2*WIDTH, U) - fSXTN(WIDTH, 2*WIDTH, V)+1)>>1)), + /* attribs */ +) + + +DEF_MACRO(fVNOROUND, + VAL, + /* NOTHING */ +) +DEF_MACRO(fVNOSAT, + VAL, + /* NOTHING */ +) + +DEF_MACRO(fVROUND, + ((VAL) + (((SHAMT)>0)?(1LL<<((SHAMT)-1)):0)), + /* NOTHING */ +) + +DEF_MACRO(fCARRY_FROM_ADD32, + (((fZXTN(32,64,A)+fZXTN(32,64,B)+C) >> 32) & 1), + /* NOTHING */ +) + +DEF_MACRO(fUARCH_NOTE_PUMP_4X, + , + () +) + +DEF_MACRO(fUARCH_NOTE_PUMP_2X, + , + () +) diff --git a/target/hexagon/insn.h b/target/hexagon/insn.h index 2e345912a8..aa26389147 100644 --- a/target/hexagon/insn.h +++ b/target/hexagon/insn.h @@ -67,6 +67,9 @@ struct Packet { bool pkt_has_store_s0; bool pkt_has_store_s1; + bool pkt_has_hvx; + Insn *vhist_insn; + Insn insn[INSTRUCTIONS_MAX]; }; diff --git a/target/hexagon/internal.h b/target/hexagon/internal.h index 6b20affdfa..82ac3042ab 100644 --- a/target/hexagon/internal.h +++ b/target/hexagon/internal.h @@ -31,6 +31,9 @@ int hexagon_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int hexagon_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); + +void hexagon_debug_vreg(CPUHexagonState *env, int regnum); +void hexagon_debug_qreg(CPUHexagonState *env, int regnum); void hexagon_debug(CPUHexagonState *env); extern const char * const hexagon_regnames[TOTAL_PER_THREAD_REGS]; diff --git a/target/hexagon/macros.h b/target/hexagon/macros.h index 094b8dabb5..19d103cad5 100644 --- a/target/hexagon/macros.h +++ b/target/hexagon/macros.h @@ -62,7 +62,7 @@ reg_field_info[FIELD].offset) #define SET_USR_FIELD(FIELD, VAL) \ - fINSERT_BITS(env->gpr[HEX_REG_USR], reg_field_info[FIELD].width, \ + fINSERT_BITS(env->new_value[HEX_REG_USR], reg_field_info[FIELD].width, \ reg_field_info[FIELD].offset, (VAL)) #endif @@ -187,18 +187,15 @@ #ifdef QEMU_GENERATE static inline void gen_pred_cancel(TCGv pred, int slot_num) { - TCGv slot_mask = tcg_const_tl(1 << slot_num); + TCGv slot_mask = tcg_temp_new(); TCGv tmp = tcg_temp_new(); - TCGv zero = tcg_const_tl(0); - TCGv one = tcg_const_tl(1); - tcg_gen_or_tl(slot_mask, hex_slot_cancelled, slot_mask); + TCGv zero = tcg_constant_tl(0); + tcg_gen_ori_tl(slot_mask, hex_slot_cancelled, 1 << slot_num); tcg_gen_andi_tl(tmp, pred, 1); tcg_gen_movcond_tl(TCG_COND_EQ, hex_slot_cancelled, tmp, zero, slot_mask, hex_slot_cancelled); tcg_temp_free(slot_mask); tcg_temp_free(tmp); - tcg_temp_free(zero); - tcg_temp_free(one); } #define PRED_LOAD_CANCEL(PRED, EA) \ gen_pred_cancel(PRED, insn->is_endloop ? 4 : insn->slot) @@ -269,6 +266,10 @@ static inline void gen_pred_cancel(TCGv pred, int slot_num) #define fNEWREG_ST(VAL) (VAL) +#define fVSATUVALN(N, VAL) \ + ({ \ + (((int)(VAL)) < 0) ? 0 : ((1LL << (N)) - 1); \ + }) #define fSATUVALN(N, VAL) \ ({ \ fSET_OVERFLOW(); \ @@ -279,10 +280,16 @@ static inline void gen_pred_cancel(TCGv pred, int slot_num) fSET_OVERFLOW(); \ ((VAL) < 0) ? (-(1LL << ((N) - 1))) : ((1LL << ((N) - 1)) - 1); \ }) +#define fVSATVALN(N, VAL) \ + ({ \ + ((VAL) < 0) ? (-(1LL << ((N) - 1))) : ((1LL << ((N) - 1)) - 1); \ + }) #define fZXTN(N, M, VAL) (((N) != 0) ? extract64((VAL), 0, (N)) : 0LL) #define fSXTN(N, M, VAL) (((N) != 0) ? sextract64((VAL), 0, (N)) : 0LL) #define fSATN(N, VAL) \ ((fSXTN(N, 64, VAL) == (VAL)) ? (VAL) : fSATVALN(N, VAL)) +#define fVSATN(N, VAL) \ + ((fSXTN(N, 64, VAL) == (VAL)) ? (VAL) : fVSATVALN(N, VAL)) #define fADDSAT64(DST, A, B) \ do { \ uint64_t __a = fCAST8u(A); \ @@ -305,12 +312,18 @@ static inline void gen_pred_cancel(TCGv pred, int slot_num) DST = __sum; \ } \ } while (0) +#define fVSATUN(N, VAL) \ + ((fZXTN(N, 64, VAL) == (VAL)) ? (VAL) : fVSATUVALN(N, VAL)) #define fSATUN(N, VAL) \ ((fZXTN(N, 64, VAL) == (VAL)) ? (VAL) : fSATUVALN(N, VAL)) #define fSATH(VAL) (fSATN(16, VAL)) #define fSATUH(VAL) (fSATUN(16, VAL)) +#define fVSATH(VAL) (fVSATN(16, VAL)) +#define fVSATUH(VAL) (fVSATUN(16, VAL)) #define fSATUB(VAL) (fSATUN(8, VAL)) #define fSATB(VAL) (fSATN(8, VAL)) +#define fVSATUB(VAL) (fVSATUN(8, VAL)) +#define fVSATB(VAL) (fVSATN(8, VAL)) #define fIMMEXT(IMM) (IMM = IMM) #define fMUST_IMMEXT(IMM) fIMMEXT(IMM) @@ -417,6 +430,8 @@ static inline TCGv gen_read_ireg(TCGv result, TCGv val, int shift) #define fCAST4s(A) ((int32_t)(A)) #define fCAST8u(A) ((uint64_t)(A)) #define fCAST8s(A) ((int64_t)(A)) +#define fCAST2_2s(A) ((int16_t)(A)) +#define fCAST2_2u(A) ((uint16_t)(A)) #define fCAST4_4s(A) ((int32_t)(A)) #define fCAST4_4u(A) ((uint32_t)(A)) #define fCAST4_8s(A) ((int64_t)((int32_t)(A))) @@ -501,10 +516,9 @@ static inline TCGv gen_read_ireg(TCGv result, TCGv val, int shift) #define fPM_M(REG, MVAL) tcg_gen_add_tl(REG, REG, MVAL) #define fPM_CIRI(REG, IMM, MVAL) \ do { \ - TCGv tcgv_siV = tcg_const_tl(siV); \ + TCGv tcgv_siV = tcg_constant_tl(siV); \ gen_helper_fcircadd(REG, REG, tcgv_siV, MuV, \ hex_gpr[HEX_REG_CS0 + MuN]); \ - tcg_temp_free(tcgv_siV); \ } while (0) #else #define fEA_IMM(IMM) do { EA = (IMM); } while (0) @@ -514,7 +528,9 @@ static inline TCGv gen_read_ireg(TCGv result, TCGv val, int shift) #define fPM_M(REG, MVAL) do { REG = REG + (MVAL); } while (0) #endif #define fSCALE(N, A) (((int64_t)(A)) << N) +#define fVSATW(A) fVSATN(32, ((long long)A)) #define fSATW(A) fSATN(32, ((long long)A)) +#define fVSAT(A) fVSATN(32, (A)) #define fSAT(A) fSATN(32, (A)) #define fSAT_ORIG_SHL(A, ORIG_REG) \ ((((int32_t)((fSAT(A)) ^ ((int32_t)(ORIG_REG)))) < 0) \ @@ -651,12 +667,14 @@ static inline TCGv gen_read_ireg(TCGv result, TCGv val, int shift) fSETBIT(j, DST, VAL); \ } \ } while (0) +#define fCOUNTONES_2(VAL) ctpop16(VAL) #define fCOUNTONES_4(VAL) ctpop32(VAL) #define fCOUNTONES_8(VAL) ctpop64(VAL) #define fBREV_8(VAL) revbit64(VAL) #define fBREV_4(VAL) revbit32(VAL) #define fCL1_8(VAL) clo64(VAL) #define fCL1_4(VAL) clo32(VAL) +#define fCL1_2(VAL) (clz32(~(uint16_t)(VAL) & 0xffff) - 16) #define fINTERLEAVE(ODD, EVEN) interleave(ODD, EVEN) #define fDEINTERLEAVE(MIXED) deinterleave(MIXED) #define fHIDE(A) A diff --git a/target/hexagon/meson.build b/target/hexagon/meson.build index 6fd9360b74..b61243103f 100644 --- a/target/hexagon/meson.build +++ b/target/hexagon/meson.build @@ -20,6 +20,7 @@ hexagon_ss = ss.source_set() hex_common_py = 'hex_common.py' attribs_def = meson.current_source_dir() / 'attribs_def.h.inc' gen_tcg_h = meson.current_source_dir() / 'gen_tcg.h' +gen_tcg_hvx_h = meson.current_source_dir() / 'gen_tcg_hvx.h' # # Step 1 @@ -63,8 +64,8 @@ helper_protos_generated = custom_target( 'helper_protos_generated.h.inc', output: 'helper_protos_generated.h.inc', depends: [semantics_generated], - depend_files: [hex_common_py, attribs_def, gen_tcg_h], - command: [python, files('gen_helper_protos.py'), semantics_generated, attribs_def, gen_tcg_h, '@OUTPUT@'], + depend_files: [hex_common_py, attribs_def, gen_tcg_h, gen_tcg_hvx_h], + command: [python, files('gen_helper_protos.py'), semantics_generated, attribs_def, gen_tcg_h, gen_tcg_hvx_h, '@OUTPUT@'], ) hexagon_ss.add(helper_protos_generated) @@ -72,8 +73,8 @@ tcg_funcs_generated = custom_target( 'tcg_funcs_generated.c.inc', output: 'tcg_funcs_generated.c.inc', depends: [semantics_generated], - depend_files: [hex_common_py, attribs_def, gen_tcg_h], - command: [python, files('gen_tcg_funcs.py'), semantics_generated, attribs_def, gen_tcg_h, '@OUTPUT@'], + depend_files: [hex_common_py, attribs_def, gen_tcg_h, gen_tcg_hvx_h], + command: [python, files('gen_tcg_funcs.py'), semantics_generated, attribs_def, gen_tcg_h, gen_tcg_hvx_h, '@OUTPUT@'], ) hexagon_ss.add(tcg_funcs_generated) @@ -90,8 +91,8 @@ helper_funcs_generated = custom_target( 'helper_funcs_generated.c.inc', output: 'helper_funcs_generated.c.inc', depends: [semantics_generated], - depend_files: [hex_common_py, attribs_def, gen_tcg_h], - command: [python, files('gen_helper_funcs.py'), semantics_generated, attribs_def, gen_tcg_h, '@OUTPUT@'], + depend_files: [hex_common_py, attribs_def, gen_tcg_h, gen_tcg_hvx_h], + command: [python, files('gen_helper_funcs.py'), semantics_generated, attribs_def, gen_tcg_h, gen_tcg_hvx_h, '@OUTPUT@'], ) hexagon_ss.add(helper_funcs_generated) @@ -156,7 +157,8 @@ dectree_generated = custom_target( 'dectree_generated.h.inc', output: 'dectree_generated.h.inc', depends: [iset_py], - command: ['env', 'PYTHONPATH=' + meson.current_build_dir(), files('dectree.py'), '@OUTPUT@'], + env: {'PYTHONPATH': meson.current_build_dir()}, + command: [python, files('dectree.py'), '@OUTPUT@'], ) hexagon_ss.add(dectree_generated) @@ -173,6 +175,8 @@ hexagon_ss.add(files( 'printinsn.c', 'arch.c', 'fma_emu.c', + 'mmvec/decode_ext_mmvec.c', + 'mmvec/system_ext_mmvec.c', )) target_arch += {'hexagon': hexagon_ss} diff --git a/target/hexagon/mmvec/decode_ext_mmvec.c b/target/hexagon/mmvec/decode_ext_mmvec.c new file mode 100644 index 0000000000..061a65ab88 --- /dev/null +++ b/target/hexagon/mmvec/decode_ext_mmvec.c @@ -0,0 +1,236 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "decode.h" +#include "opcodes.h" +#include "insn.h" +#include "iclass.h" +#include "mmvec/mmvec.h" +#include "mmvec/decode_ext_mmvec.h" + +static void +check_new_value(Packet *pkt) +{ + /* .new value for a MMVector store */ + int i, j; + const char *reginfo; + const char *destletters; + const char *dststr = NULL; + uint16_t def_opcode; + char letter; + int def_regnum; + + for (i = 1; i < pkt->num_insns; i++) { + uint16_t use_opcode = pkt->insn[i].opcode; + if (GET_ATTRIB(use_opcode, A_DOTNEWVALUE) && + GET_ATTRIB(use_opcode, A_CVI) && + GET_ATTRIB(use_opcode, A_STORE)) { + int use_regidx = strchr(opcode_reginfo[use_opcode], 's') - + opcode_reginfo[use_opcode]; + /* + * What's encoded at the N-field is the offset to who's producing + * the value. + * Shift off the LSB which indicates odd/even register. + */ + int def_off = ((pkt->insn[i].regno[use_regidx]) >> 1); + int def_oreg = pkt->insn[i].regno[use_regidx] & 1; + int def_idx = -1; + for (j = i - 1; (j >= 0) && (def_off >= 0); j--) { + if (!GET_ATTRIB(pkt->insn[j].opcode, A_CVI)) { + continue; + } + def_off--; + if (def_off == 0) { + def_idx = j; + break; + } + } + /* + * Check for a badly encoded N-field which points to an instruction + * out-of-range + */ + g_assert(!((def_off != 0) || (def_idx < 0) || + (def_idx > (pkt->num_insns - 1)))); + + /* def_idx is the index of the producer */ + def_opcode = pkt->insn[def_idx].opcode; + reginfo = opcode_reginfo[def_opcode]; + destletters = "dexy"; + for (j = 0; (letter = destletters[j]) != 0; j++) { + dststr = strchr(reginfo, letter); + if (dststr != NULL) { + break; + } + } + if ((dststr == NULL) && GET_ATTRIB(def_opcode, A_CVI_GATHER)) { + def_regnum = 0; + pkt->insn[i].regno[use_regidx] = def_oreg; + pkt->insn[i].new_value_producer_slot = pkt->insn[def_idx].slot; + } else { + if (dststr == NULL) { + /* still not there, we have a bad packet */ + g_assert_not_reached(); + } + def_regnum = pkt->insn[def_idx].regno[dststr - reginfo]; + /* Now patch up the consumer with the register number */ + pkt->insn[i].regno[use_regidx] = def_regnum ^ def_oreg; + /* special case for (Vx,Vy) */ + dststr = strchr(reginfo, 'y'); + if (def_oreg && strchr(reginfo, 'x') && dststr) { + def_regnum = pkt->insn[def_idx].regno[dststr - reginfo]; + pkt->insn[i].regno[use_regidx] = def_regnum; + } + /* + * We need to remember who produces this value to later + * check if it was dynamically cancelled + */ + pkt->insn[i].new_value_producer_slot = pkt->insn[def_idx].slot; + } + } + } +} + +/* + * We don't want to reorder slot1/slot0 with respect to each other. + * So in our shuffling, we don't want to move the .cur / .tmp vmem earlier + * Instead, we should move the producing instruction later + * But the producing instruction might feed a .new store! + * So we may need to move that even later. + */ + +static void +decode_mmvec_move_cvi_to_end(Packet *pkt, int max) +{ + int i; + for (i = 0; i < max; i++) { + if (GET_ATTRIB(pkt->insn[i].opcode, A_CVI)) { + int last_inst = pkt->num_insns - 1; + uint16_t last_opcode = pkt->insn[last_inst].opcode; + + /* + * If the last instruction is an endloop, move to the one before it + * Keep endloop as the last thing always + */ + if ((last_opcode == J2_endloop0) || + (last_opcode == J2_endloop1) || + (last_opcode == J2_endloop01)) { + last_inst--; + } + + decode_send_insn_to(pkt, i, last_inst); + max--; + i--; /* Retry this index now that packet has rotated */ + } + } +} + +static void +decode_shuffle_for_execution_vops(Packet *pkt) +{ + /* + * Sort for .new + */ + int i; + for (i = 0; i < pkt->num_insns; i++) { + uint16_t opcode = pkt->insn[i].opcode; + if (GET_ATTRIB(opcode, A_LOAD) && + (GET_ATTRIB(opcode, A_CVI_NEW) || + GET_ATTRIB(opcode, A_CVI_TMP))) { + /* + * Find prior consuming vector instructions + * Move to end of packet + */ + decode_mmvec_move_cvi_to_end(pkt, i); + break; + } + } + + /* Move HVX new value stores to the end of the packet */ + for (i = 0; i < pkt->num_insns - 1; i++) { + uint16_t opcode = pkt->insn[i].opcode; + if (GET_ATTRIB(opcode, A_STORE) && + GET_ATTRIB(opcode, A_CVI_NEW) && + !GET_ATTRIB(opcode, A_CVI_SCATTER_RELEASE)) { + int last_inst = pkt->num_insns - 1; + uint16_t last_opcode = pkt->insn[last_inst].opcode; + + /* + * If the last instruction is an endloop, move to the one before it + * Keep endloop as the last thing always + */ + if ((last_opcode == J2_endloop0) || + (last_opcode == J2_endloop1) || + (last_opcode == J2_endloop01)) { + last_inst--; + } + + decode_send_insn_to(pkt, i, last_inst); + break; + } + } +} + +static void +check_for_vhist(Packet *pkt) +{ + pkt->vhist_insn = NULL; + for (int i = 0; i < pkt->num_insns; i++) { + Insn *insn = &pkt->insn[i]; + int opcode = insn->opcode; + if (GET_ATTRIB(opcode, A_CVI) && GET_ATTRIB(opcode, A_CVI_4SLOT)) { + pkt->vhist_insn = insn; + return; + } + } +} + +/* + * Public Functions + */ + +SlotMask mmvec_ext_decode_find_iclass_slots(int opcode) +{ + if (GET_ATTRIB(opcode, A_CVI_VM)) { + /* HVX memory instruction */ + if (GET_ATTRIB(opcode, A_RESTRICT_SLOT0ONLY)) { + return SLOTS_0; + } else if (GET_ATTRIB(opcode, A_RESTRICT_SLOT1ONLY)) { + return SLOTS_1; + } + return SLOTS_01; + } else if (GET_ATTRIB(opcode, A_RESTRICT_SLOT2ONLY)) { + return SLOTS_2; + } else if (GET_ATTRIB(opcode, A_CVI_VX)) { + /* HVX multiply instruction */ + return SLOTS_23; + } else if (GET_ATTRIB(opcode, A_CVI_VS_VX)) { + /* HVX permute/shift instruction */ + return SLOTS_23; + } else { + return SLOTS_0123; + } +} + +void mmvec_ext_decode_checks(Packet *pkt, bool disas_only) +{ + check_new_value(pkt); + if (!disas_only) { + decode_shuffle_for_execution_vops(pkt); + } + check_for_vhist(pkt); +} diff --git a/target/hexagon/mmvec/decode_ext_mmvec.h b/target/hexagon/mmvec/decode_ext_mmvec.h new file mode 100644 index 0000000000..3664b68cf9 --- /dev/null +++ b/target/hexagon/mmvec/decode_ext_mmvec.h @@ -0,0 +1,24 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef HEXAGON_DECODE_EXT_MMVEC_H +#define HEXAGON_DECODE_EXT_MMVEC_H + +void mmvec_ext_decode_checks(Packet *pkt, bool disas_only); +SlotMask mmvec_ext_decode_find_iclass_slots(int opcode); + +#endif diff --git a/target/hexagon/mmvec/macros.h b/target/hexagon/mmvec/macros.h new file mode 100644 index 0000000000..10f4630364 --- /dev/null +++ b/target/hexagon/mmvec/macros.h @@ -0,0 +1,354 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef HEXAGON_MMVEC_MACROS_H +#define HEXAGON_MMVEC_MACROS_H + +#include "qemu/osdep.h" +#include "qemu/host-utils.h" +#include "arch.h" +#include "mmvec/system_ext_mmvec.h" + +#ifndef QEMU_GENERATE +#define VdV (*(MMVector *)(VdV_void)) +#define VsV (*(MMVector *)(VsV_void)) +#define VuV (*(MMVector *)(VuV_void)) +#define VvV (*(MMVector *)(VvV_void)) +#define VwV (*(MMVector *)(VwV_void)) +#define VxV (*(MMVector *)(VxV_void)) +#define VyV (*(MMVector *)(VyV_void)) + +#define VddV (*(MMVectorPair *)(VddV_void)) +#define VuuV (*(MMVectorPair *)(VuuV_void)) +#define VvvV (*(MMVectorPair *)(VvvV_void)) +#define VxxV (*(MMVectorPair *)(VxxV_void)) + +#define QeV (*(MMQReg *)(QeV_void)) +#define QdV (*(MMQReg *)(QdV_void)) +#define QsV (*(MMQReg *)(QsV_void)) +#define QtV (*(MMQReg *)(QtV_void)) +#define QuV (*(MMQReg *)(QuV_void)) +#define QvV (*(MMQReg *)(QvV_void)) +#define QxV (*(MMQReg *)(QxV_void)) +#endif + +#define LOG_VTCM_BYTE(VA, MASK, VAL, IDX) \ + do { \ + env->vtcm_log.data.ub[IDX] = (VAL); \ + if (MASK) { \ + set_bit((IDX), env->vtcm_log.mask); \ + } else { \ + clear_bit((IDX), env->vtcm_log.mask); \ + } \ + env->vtcm_log.va[IDX] = (VA); \ + } while (0) + +#define fNOTQ(VAL) \ + ({ \ + MMQReg _ret; \ + int _i_; \ + for (_i_ = 0; _i_ < fVECSIZE() / 64; _i_++) { \ + _ret.ud[_i_] = ~VAL.ud[_i_]; \ + } \ + _ret;\ + }) +#define fGETQBITS(REG, WIDTH, MASK, BITNO) \ + ((MASK) & (REG.w[(BITNO) >> 5] >> ((BITNO) & 0x1f))) +#define fGETQBIT(REG, BITNO) fGETQBITS(REG, 1, 1, BITNO) +#define fGENMASKW(QREG, IDX) \ + (((fGETQBIT(QREG, (IDX * 4 + 0)) ? 0xFF : 0x0) << 0) | \ + ((fGETQBIT(QREG, (IDX * 4 + 1)) ? 0xFF : 0x0) << 8) | \ + ((fGETQBIT(QREG, (IDX * 4 + 2)) ? 0xFF : 0x0) << 16) | \ + ((fGETQBIT(QREG, (IDX * 4 + 3)) ? 0xFF : 0x0) << 24)) +#define fGETNIBBLE(IDX, SRC) (fSXTN(4, 8, (SRC >> (4 * IDX)) & 0xF)) +#define fGETCRUMB(IDX, SRC) (fSXTN(2, 8, (SRC >> (2 * IDX)) & 0x3)) +#define fGETCRUMB_SYMMETRIC(IDX, SRC) \ + ((fGETCRUMB(IDX, SRC) >= 0 ? (2 - fGETCRUMB(IDX, SRC)) \ + : fGETCRUMB(IDX, SRC))) +#define fGENMASKH(QREG, IDX) \ + (((fGETQBIT(QREG, (IDX * 2 + 0)) ? 0xFF : 0x0) << 0) | \ + ((fGETQBIT(QREG, (IDX * 2 + 1)) ? 0xFF : 0x0) << 8)) +#define fGETMASKW(VREG, QREG, IDX) (VREG.w[IDX] & fGENMASKW((QREG), IDX)) +#define fGETMASKH(VREG, QREG, IDX) (VREG.h[IDX] & fGENMASKH((QREG), IDX)) +#define fCONDMASK8(QREG, IDX, YESVAL, NOVAL) \ + (fGETQBIT(QREG, IDX) ? (YESVAL) : (NOVAL)) +#define fCONDMASK16(QREG, IDX, YESVAL, NOVAL) \ + ((fGENMASKH(QREG, IDX) & (YESVAL)) | \ + (fGENMASKH(fNOTQ(QREG), IDX) & (NOVAL))) +#define fCONDMASK32(QREG, IDX, YESVAL, NOVAL) \ + ((fGENMASKW(QREG, IDX) & (YESVAL)) | \ + (fGENMASKW(fNOTQ(QREG), IDX) & (NOVAL))) +#define fSETQBITS(REG, WIDTH, MASK, BITNO, VAL) \ + do { \ + uint32_t __TMP = (VAL); \ + REG.w[(BITNO) >> 5] &= ~((MASK) << ((BITNO) & 0x1f)); \ + REG.w[(BITNO) >> 5] |= (((__TMP) & (MASK)) << ((BITNO) & 0x1f)); \ + } while (0) +#define fSETQBIT(REG, BITNO, VAL) fSETQBITS(REG, 1, 1, BITNO, VAL) +#define fVBYTES() (fVECSIZE()) +#define fVALIGN(ADDR, LOG2_ALIGNMENT) (ADDR = ADDR & ~(LOG2_ALIGNMENT - 1)) +#define fVLASTBYTE(ADDR, LOG2_ALIGNMENT) (ADDR = ADDR | (LOG2_ALIGNMENT - 1)) +#define fVELEM(WIDTH) ((fVECSIZE() * 8) / WIDTH) +#define fVECLOGSIZE() (7) +#define fVECSIZE() (1 << fVECLOGSIZE()) +#define fSWAPB(A, B) do { uint8_t tmp = A; A = B; B = tmp; } while (0) +#define fV_AL_CHECK(EA, MASK) \ + if ((EA) & (MASK)) { \ + warn("aligning misaligned vector. EA=%08x", (EA)); \ + } +#define fSCATTER_INIT(REGION_START, LENGTH, ELEMENT_SIZE) \ + mem_vector_scatter_init(env) +#define fGATHER_INIT(REGION_START, LENGTH, ELEMENT_SIZE) \ + mem_vector_gather_init(env) +#define fSCATTER_FINISH(OP) +#define fGATHER_FINISH() +#define fLOG_SCATTER_OP(SIZE) \ + do { \ + env->vtcm_log.op = true; \ + env->vtcm_log.op_size = SIZE; \ + } while (0) +#define fVLOG_VTCM_WORD_INCREMENT(EA, OFFSET, INC, IDX, ALIGNMENT, LEN) \ + do { \ + int log_byte = 0; \ + target_ulong va = EA; \ + target_ulong va_high = EA + LEN; \ + for (int i0 = 0; i0 < 4; i0++) { \ + log_byte = (va + i0) <= va_high; \ + LOG_VTCM_BYTE(va + i0, log_byte, INC. ub[4 * IDX + i0], \ + 4 * IDX + i0); \ + } \ + } while (0) +#define fVLOG_VTCM_HALFWORD_INCREMENT(EA, OFFSET, INC, IDX, ALIGNMENT, LEN) \ + do { \ + int log_byte = 0; \ + target_ulong va = EA; \ + target_ulong va_high = EA + LEN; \ + for (int i0 = 0; i0 < 2; i0++) { \ + log_byte = (va + i0) <= va_high; \ + LOG_VTCM_BYTE(va + i0, log_byte, INC.ub[2 * IDX + i0], \ + 2 * IDX + i0); \ + } \ + } while (0) + +#define fVLOG_VTCM_HALFWORD_INCREMENT_DV(EA, OFFSET, INC, IDX, IDX2, IDX_H, \ + ALIGNMENT, LEN) \ + do { \ + int log_byte = 0; \ + target_ulong va = EA; \ + target_ulong va_high = EA + LEN; \ + for (int i0 = 0; i0 < 2; i0++) { \ + log_byte = (va + i0) <= va_high; \ + LOG_VTCM_BYTE(va + i0, log_byte, INC.ub[2 * IDX + i0], \ + 2 * IDX + i0); \ + } \ + } while (0) + +/* NOTE - Will this always be tmp_VRegs[0]; */ +#define GATHER_FUNCTION(EA, OFFSET, IDX, LEN, ELEMENT_SIZE, BANK_IDX, QVAL) \ + do { \ + int i0; \ + target_ulong va = EA; \ + target_ulong va_high = EA + LEN; \ + uintptr_t ra = GETPC(); \ + int log_bank = 0; \ + int log_byte = 0; \ + for (i0 = 0; i0 < ELEMENT_SIZE; i0++) { \ + log_byte = ((va + i0) <= va_high) && QVAL; \ + log_bank |= (log_byte << i0); \ + uint8_t B; \ + B = cpu_ldub_data_ra(env, EA + i0, ra); \ + env->tmp_VRegs[0].ub[ELEMENT_SIZE * IDX + i0] = B; \ + LOG_VTCM_BYTE(va + i0, log_byte, B, ELEMENT_SIZE * IDX + i0); \ + } \ + } while (0) +#define fVLOG_VTCM_GATHER_WORD(EA, OFFSET, IDX, LEN) \ + do { \ + GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, 1); \ + } while (0) +#define fVLOG_VTCM_GATHER_HALFWORD(EA, OFFSET, IDX, LEN) \ + do { \ + GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, 1); \ + } while (0) +#define fVLOG_VTCM_GATHER_HALFWORD_DV(EA, OFFSET, IDX, IDX2, IDX_H, LEN) \ + do { \ + GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, (2 * IDX2 + IDX_H), 1); \ + } while (0) +#define fVLOG_VTCM_GATHER_WORDQ(EA, OFFSET, IDX, Q, LEN) \ + do { \ + GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, \ + fGETQBIT(QsV, 4 * IDX + i0)); \ + } while (0) +#define fVLOG_VTCM_GATHER_HALFWORDQ(EA, OFFSET, IDX, Q, LEN) \ + do { \ + GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, \ + fGETQBIT(QsV, 2 * IDX + i0)); \ + } while (0) +#define fVLOG_VTCM_GATHER_HALFWORDQ_DV(EA, OFFSET, IDX, IDX2, IDX_H, Q, LEN) \ + do { \ + GATHER_FUNCTION(EA, OFFSET, IDX, LEN, 2, (2 * IDX2 + IDX_H), \ + fGETQBIT(QsV, 2 * IDX + i0)); \ + } while (0) +#define SCATTER_OP_WRITE_TO_MEM(TYPE) \ + do { \ + uintptr_t ra = GETPC(); \ + for (int i = 0; i < sizeof(MMVector); i += sizeof(TYPE)) { \ + if (test_bit(i, env->vtcm_log.mask)) { \ + TYPE dst = 0; \ + TYPE inc = 0; \ + for (int j = 0; j < sizeof(TYPE); j++) { \ + uint8_t val; \ + val = cpu_ldub_data_ra(env, env->vtcm_log.va[i + j], ra); \ + dst |= val << (8 * j); \ + inc |= env->vtcm_log.data.ub[j + i] << (8 * j); \ + clear_bit(j + i, env->vtcm_log.mask); \ + env->vtcm_log.data.ub[j + i] = 0; \ + } \ + dst += inc; \ + for (int j = 0; j < sizeof(TYPE); j++) { \ + cpu_stb_data_ra(env, env->vtcm_log.va[i + j], \ + (dst >> (8 * j)) & 0xFF, ra); \ + } \ + } \ + } \ + } while (0) +#define SCATTER_OP_PROBE_MEM(TYPE, MMU_IDX, RETADDR) \ + do { \ + for (int i = 0; i < sizeof(MMVector); i += sizeof(TYPE)) { \ + if (test_bit(i, env->vtcm_log.mask)) { \ + for (int j = 0; j < sizeof(TYPE); j++) { \ + probe_read(env, env->vtcm_log.va[i + j], 1, \ + MMU_IDX, RETADDR); \ + probe_write(env, env->vtcm_log.va[i + j], 1, \ + MMU_IDX, RETADDR); \ + } \ + } \ + } \ + } while (0) +#define SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, ELEM_SIZE, BANK_IDX, QVAL, IN) \ + do { \ + int i0; \ + target_ulong va = EA; \ + target_ulong va_high = EA + LEN; \ + int log_bank = 0; \ + int log_byte = 0; \ + for (i0 = 0; i0 < ELEM_SIZE; i0++) { \ + log_byte = ((va + i0) <= va_high) && QVAL; \ + log_bank |= (log_byte << i0); \ + LOG_VTCM_BYTE(va + i0, log_byte, IN.ub[ELEM_SIZE * IDX + i0], \ + ELEM_SIZE * IDX + i0); \ + } \ + } while (0) +#define fVLOG_VTCM_HALFWORD(EA, OFFSET, IN, IDX, LEN) \ + do { \ + SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, 1, IN); \ + } while (0) +#define fVLOG_VTCM_WORD(EA, OFFSET, IN, IDX, LEN) \ + do { \ + SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, 1, IN); \ + } while (0) +#define fVLOG_VTCM_HALFWORDQ(EA, OFFSET, IN, IDX, Q, LEN) \ + do { \ + SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, IDX, \ + fGETQBIT(QsV, 2 * IDX + i0), IN); \ + } while (0) +#define fVLOG_VTCM_WORDQ(EA, OFFSET, IN, IDX, Q, LEN) \ + do { \ + SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 4, IDX, \ + fGETQBIT(QsV, 4 * IDX + i0), IN); \ + } while (0) +#define fVLOG_VTCM_HALFWORD_DV(EA, OFFSET, IN, IDX, IDX2, IDX_H, LEN) \ + do { \ + SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, \ + (2 * IDX2 + IDX_H), 1, IN); \ + } while (0) +#define fVLOG_VTCM_HALFWORDQ_DV(EA, OFFSET, IN, IDX, Q, IDX2, IDX_H, LEN) \ + do { \ + SCATTER_FUNCTION(EA, OFFSET, IDX, LEN, 2, (2 * IDX2 + IDX_H), \ + fGETQBIT(QsV, 2 * IDX + i0), IN); \ + } while (0) +#define fSTORERELEASE(EA, TYPE) \ + do { \ + fV_AL_CHECK(EA, fVECSIZE() - 1); \ + } while (0) +#ifdef QEMU_GENERATE +#define fLOADMMV(EA, DST) gen_vreg_load(ctx, DST##_off, EA, true) +#endif +#ifdef QEMU_GENERATE +#define fLOADMMVU(EA, DST) gen_vreg_load(ctx, DST##_off, EA, false) +#endif +#ifdef QEMU_GENERATE +#define fSTOREMMV(EA, SRC) \ + gen_vreg_store(ctx, insn, pkt, EA, SRC##_off, insn->slot, true) +#endif +#ifdef QEMU_GENERATE +#define fSTOREMMVQ(EA, SRC, MASK) \ + gen_vreg_masked_store(ctx, EA, SRC##_off, MASK##_off, insn->slot, false) +#endif +#ifdef QEMU_GENERATE +#define fSTOREMMVNQ(EA, SRC, MASK) \ + gen_vreg_masked_store(ctx, EA, SRC##_off, MASK##_off, insn->slot, true) +#endif +#ifdef QEMU_GENERATE +#define fSTOREMMVU(EA, SRC) \ + gen_vreg_store(ctx, insn, pkt, EA, SRC##_off, insn->slot, false) +#endif +#define fVFOREACH(WIDTH, VAR) for (VAR = 0; VAR < fVELEM(WIDTH); VAR++) +#define fVARRAY_ELEMENT_ACCESS(ARRAY, TYPE, INDEX) \ + ARRAY.v[(INDEX) / (fVECSIZE() / (sizeof(ARRAY.TYPE[0])))].TYPE[(INDEX) % \ + (fVECSIZE() / (sizeof(ARRAY.TYPE[0])))] + +#define fVSATDW(U, V) fVSATW(((((long long)U) << 32) | fZXTN(32, 64, V))) +#define fVASL_SATHI(U, V) fVSATW(((U) << 1) | ((V) >> 31)) +#define fVUADDSAT(WIDTH, U, V) \ + fVSATUN(WIDTH, fZXTN(WIDTH, 2 * WIDTH, U) + fZXTN(WIDTH, 2 * WIDTH, V)) +#define fVSADDSAT(WIDTH, U, V) \ + fVSATN(WIDTH, fSXTN(WIDTH, 2 * WIDTH, U) + fSXTN(WIDTH, 2 * WIDTH, V)) +#define fVUSUBSAT(WIDTH, U, V) \ + fVSATUN(WIDTH, fZXTN(WIDTH, 2 * WIDTH, U) - fZXTN(WIDTH, 2 * WIDTH, V)) +#define fVSSUBSAT(WIDTH, U, V) \ + fVSATN(WIDTH, fSXTN(WIDTH, 2 * WIDTH, U) - fSXTN(WIDTH, 2 * WIDTH, V)) +#define fVAVGU(WIDTH, U, V) \ + ((fZXTN(WIDTH, 2 * WIDTH, U) + fZXTN(WIDTH, 2 * WIDTH, V)) >> 1) +#define fVAVGURND(WIDTH, U, V) \ + ((fZXTN(WIDTH, 2 * WIDTH, U) + fZXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1) +#define fVNAVGU(WIDTH, U, V) \ + ((fZXTN(WIDTH, 2 * WIDTH, U) - fZXTN(WIDTH, 2 * WIDTH, V)) >> 1) +#define fVNAVGURNDSAT(WIDTH, U, V) \ + fVSATUN(WIDTH, ((fZXTN(WIDTH, 2 * WIDTH, U) - \ + fZXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1)) +#define fVAVGS(WIDTH, U, V) \ + ((fSXTN(WIDTH, 2 * WIDTH, U) + fSXTN(WIDTH, 2 * WIDTH, V)) >> 1) +#define fVAVGSRND(WIDTH, U, V) \ + ((fSXTN(WIDTH, 2 * WIDTH, U) + fSXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1) +#define fVNAVGS(WIDTH, U, V) \ + ((fSXTN(WIDTH, 2 * WIDTH, U) - fSXTN(WIDTH, 2 * WIDTH, V)) >> 1) +#define fVNAVGSRND(WIDTH, U, V) \ + ((fSXTN(WIDTH, 2 * WIDTH, U) - fSXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1) +#define fVNAVGSRNDSAT(WIDTH, U, V) \ + fVSATN(WIDTH, ((fSXTN(WIDTH, 2 * WIDTH, U) - \ + fSXTN(WIDTH, 2 * WIDTH, V) + 1) >> 1)) +#define fVNOROUND(VAL, SHAMT) VAL +#define fVNOSAT(VAL) VAL +#define fVROUND(VAL, SHAMT) \ + ((VAL) + (((SHAMT) > 0) ? (1LL << ((SHAMT) - 1)) : 0)) +#define fCARRY_FROM_ADD32(A, B, C) \ + (((fZXTN(32, 64, A) + fZXTN(32, 64, B) + C) >> 32) & 1) +#define fUARCH_NOTE_PUMP_4X() +#define fUARCH_NOTE_PUMP_2X() + +#define IV1DEAD() +#endif diff --git a/target/hexagon/mmvec/mmvec.h b/target/hexagon/mmvec/mmvec.h new file mode 100644 index 0000000000..52d470709c --- /dev/null +++ b/target/hexagon/mmvec/mmvec.h @@ -0,0 +1,82 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef HEXAGON_MMVEC_H +#define HEXAGON_MMVEC_H + +#define MAX_VEC_SIZE_LOGBYTES 7 +#define MAX_VEC_SIZE_BYTES (1 << MAX_VEC_SIZE_LOGBYTES) + +#define NUM_VREGS 32 +#define NUM_QREGS 4 + +typedef uint32_t VRegMask; /* at least NUM_VREGS bits */ +typedef uint32_t QRegMask; /* at least NUM_QREGS bits */ + +#define VECTOR_SIZE_BYTE (fVECSIZE()) + +typedef union { + uint64_t ud[MAX_VEC_SIZE_BYTES / 8]; + int64_t d[MAX_VEC_SIZE_BYTES / 8]; + uint32_t uw[MAX_VEC_SIZE_BYTES / 4]; + int32_t w[MAX_VEC_SIZE_BYTES / 4]; + uint16_t uh[MAX_VEC_SIZE_BYTES / 2]; + int16_t h[MAX_VEC_SIZE_BYTES / 2]; + uint8_t ub[MAX_VEC_SIZE_BYTES / 1]; + int8_t b[MAX_VEC_SIZE_BYTES / 1]; +} MMVector; + +typedef union { + uint64_t ud[2 * MAX_VEC_SIZE_BYTES / 8]; + int64_t d[2 * MAX_VEC_SIZE_BYTES / 8]; + uint32_t uw[2 * MAX_VEC_SIZE_BYTES / 4]; + int32_t w[2 * MAX_VEC_SIZE_BYTES / 4]; + uint16_t uh[2 * MAX_VEC_SIZE_BYTES / 2]; + int16_t h[2 * MAX_VEC_SIZE_BYTES / 2]; + uint8_t ub[2 * MAX_VEC_SIZE_BYTES / 1]; + int8_t b[2 * MAX_VEC_SIZE_BYTES / 1]; + MMVector v[2]; +} MMVectorPair; + +typedef union { + uint64_t ud[MAX_VEC_SIZE_BYTES / 8 / 8]; + int64_t d[MAX_VEC_SIZE_BYTES / 8 / 8]; + uint32_t uw[MAX_VEC_SIZE_BYTES / 4 / 8]; + int32_t w[MAX_VEC_SIZE_BYTES / 4 / 8]; + uint16_t uh[MAX_VEC_SIZE_BYTES / 2 / 8]; + int16_t h[MAX_VEC_SIZE_BYTES / 2 / 8]; + uint8_t ub[MAX_VEC_SIZE_BYTES / 1 / 8]; + int8_t b[MAX_VEC_SIZE_BYTES / 1 / 8]; +} MMQReg; + +typedef struct { + MMVector data; + DECLARE_BITMAP(mask, MAX_VEC_SIZE_BYTES); + target_ulong va[MAX_VEC_SIZE_BYTES]; + bool op; + int op_size; +} VTCMStoreLog; + + +/* Types of vector register assignment */ +typedef enum { + EXT_DFL, /* Default */ + EXT_NEW, /* New - value used in the same packet */ + EXT_TMP /* Temp - value used but not stored to register */ +} VRegWriteType; + +#endif diff --git a/target/hexagon/mmvec/system_ext_mmvec.c b/target/hexagon/mmvec/system_ext_mmvec.c new file mode 100644 index 0000000000..8351f2cc01 --- /dev/null +++ b/target/hexagon/mmvec/system_ext_mmvec.c @@ -0,0 +1,47 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "mmvec/system_ext_mmvec.h" + +void mem_gather_store(CPUHexagonState *env, target_ulong vaddr, int slot) +{ + size_t size = sizeof(MMVector); + + env->vstore_pending[slot] = 1; + env->vstore[slot].va = vaddr; + env->vstore[slot].size = size; + memcpy(&env->vstore[slot].data.ub[0], &env->tmp_VRegs[0], size); + + /* On a gather store, overwrite the store mask to emulate dropped gathers */ + bitmap_copy(env->vstore[slot].mask, env->vtcm_log.mask, size); +} + +void mem_vector_scatter_init(CPUHexagonState *env) +{ + bitmap_zero(env->vtcm_log.mask, MAX_VEC_SIZE_BYTES); + + env->vtcm_pending = true; + env->vtcm_log.op = false; + env->vtcm_log.op_size = 0; +} + +void mem_vector_gather_init(CPUHexagonState *env) +{ + bitmap_zero(env->vtcm_log.mask, MAX_VEC_SIZE_BYTES); +} diff --git a/target/hexagon/mmvec/system_ext_mmvec.h b/target/hexagon/mmvec/system_ext_mmvec.h new file mode 100644 index 0000000000..bcefbffdf2 --- /dev/null +++ b/target/hexagon/mmvec/system_ext_mmvec.h @@ -0,0 +1,25 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef HEXAGON_SYSTEM_EXT_MMVEC_H +#define HEXAGON_SYSTEM_EXT_MMVEC_H + +void mem_gather_store(CPUHexagonState *env, target_ulong vaddr, int slot); +void mem_vector_scatter_init(CPUHexagonState *env); +void mem_vector_gather_init(CPUHexagonState *env); + +#endif diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c index 61d5cde939..057baf9a48 100644 --- a/target/hexagon/op_helper.c +++ b/target/hexagon/op_helper.c @@ -27,6 +27,8 @@ #include "arch.h" #include "hex_arch_types.h" #include "fma_emu.h" +#include "mmvec/mmvec.h" +#include "mmvec/macros.h" #define SF_BIAS 127 #define SF_MANTBITS 23 @@ -164,6 +166,57 @@ void HELPER(commit_store)(CPUHexagonState *env, int slot_num) } } +void HELPER(gather_store)(CPUHexagonState *env, uint32_t addr, int slot) +{ + mem_gather_store(env, addr, slot); +} + +void HELPER(commit_hvx_stores)(CPUHexagonState *env) +{ + uintptr_t ra = GETPC(); + int i; + + /* Normal (possibly masked) vector store */ + for (i = 0; i < VSTORES_MAX; i++) { + if (env->vstore_pending[i]) { + env->vstore_pending[i] = 0; + target_ulong va = env->vstore[i].va; + int size = env->vstore[i].size; + for (int j = 0; j < size; j++) { + if (test_bit(j, env->vstore[i].mask)) { + cpu_stb_data_ra(env, va + j, env->vstore[i].data.ub[j], ra); + } + } + } + } + + /* Scatter store */ + if (env->vtcm_pending) { + env->vtcm_pending = false; + if (env->vtcm_log.op) { + /* Need to perform the scatter read/modify/write at commit time */ + if (env->vtcm_log.op_size == 2) { + SCATTER_OP_WRITE_TO_MEM(uint16_t); + } else if (env->vtcm_log.op_size == 4) { + /* Word Scatter += */ + SCATTER_OP_WRITE_TO_MEM(uint32_t); + } else { + g_assert_not_reached(); + } + } else { + for (i = 0; i < sizeof(MMVector); i++) { + if (test_bit(i, env->vtcm_log.mask)) { + cpu_stb_data_ra(env, env->vtcm_log.va[i], + env->vtcm_log.data.ub[i], ra); + clear_bit(i, env->vtcm_log.mask); + env->vtcm_log.data.ub[i] = 0; + } + + } + } + } +} + static void print_store(CPUHexagonState *env, int slot) { if (!(env->slot_cancelled & (1 << slot))) { @@ -242,9 +295,10 @@ void HELPER(debug_commit_end)(CPUHexagonState *env, int has_st0, int has_st1) HEX_DEBUG_LOG("Next PC = " TARGET_FMT_lx "\n", env->next_PC); HEX_DEBUG_LOG("Exec counters: pkt = " TARGET_FMT_lx ", insn = " TARGET_FMT_lx - "\n", + ", hvx = " TARGET_FMT_lx "\n", env->gpr[HEX_REG_QEMU_PKT_CNT], - env->gpr[HEX_REG_QEMU_INSN_CNT]); + env->gpr[HEX_REG_QEMU_INSN_CNT], + env->gpr[HEX_REG_QEMU_HVX_CNT]); } @@ -377,6 +431,81 @@ int32_t HELPER(vacsh_pred)(CPUHexagonState *env, return PeV; } +static void probe_store(CPUHexagonState *env, int slot, int mmu_idx) +{ + if (!(env->slot_cancelled & (1 << slot))) { + size1u_t width = env->mem_log_stores[slot].width; + target_ulong va = env->mem_log_stores[slot].va; + uintptr_t ra = GETPC(); + probe_write(env, va, width, mmu_idx, ra); + } +} + +/* Called during packet commit when there are two scalar stores */ +void HELPER(probe_pkt_scalar_store_s0)(CPUHexagonState *env, int mmu_idx) +{ + probe_store(env, 0, mmu_idx); +} + +void HELPER(probe_hvx_stores)(CPUHexagonState *env, int mmu_idx) +{ + uintptr_t retaddr = GETPC(); + int i; + + /* Normal (possibly masked) vector store */ + for (i = 0; i < VSTORES_MAX; i++) { + if (env->vstore_pending[i]) { + target_ulong va = env->vstore[i].va; + int size = env->vstore[i].size; + for (int j = 0; j < size; j++) { + if (test_bit(j, env->vstore[i].mask)) { + probe_write(env, va + j, 1, mmu_idx, retaddr); + } + } + } + } + + /* Scatter store */ + if (env->vtcm_pending) { + if (env->vtcm_log.op) { + /* Need to perform the scatter read/modify/write at commit time */ + if (env->vtcm_log.op_size == 2) { + SCATTER_OP_PROBE_MEM(size2u_t, mmu_idx, retaddr); + } else if (env->vtcm_log.op_size == 4) { + /* Word Scatter += */ + SCATTER_OP_PROBE_MEM(size4u_t, mmu_idx, retaddr); + } else { + g_assert_not_reached(); + } + } else { + for (int i = 0; i < sizeof(MMVector); i++) { + if (test_bit(i, env->vtcm_log.mask)) { + probe_write(env, env->vtcm_log.va[i], 1, mmu_idx, retaddr); + } + + } + } + } +} + +void HELPER(probe_pkt_scalar_hvx_stores)(CPUHexagonState *env, int mask, + int mmu_idx) +{ + bool has_st0 = (mask >> 0) & 1; + bool has_st1 = (mask >> 1) & 1; + bool has_hvx_stores = (mask >> 2) & 1; + + if (has_st0) { + probe_store(env, 0, mmu_idx); + } + if (has_st1) { + probe_store(env, 1, mmu_idx); + } + if (has_hvx_stores) { + HELPER(probe_hvx_stores)(env, mmu_idx); + } +} + /* * mem_noshuf * Section 5.5 of the Hexagon V67 Programmer's Reference Manual @@ -1165,6 +1294,171 @@ float64 HELPER(dfmpyhh)(CPUHexagonState *env, float64 RxxV, return RxxV; } +/* Histogram instructions */ + +void HELPER(vhist)(CPUHexagonState *env) +{ + MMVector *input = &env->tmp_VRegs[0]; + + for (int lane = 0; lane < 8; lane++) { + for (int i = 0; i < sizeof(MMVector) / 8; ++i) { + unsigned char value = input->ub[(sizeof(MMVector) / 8) * lane + i]; + unsigned char regno = value >> 3; + unsigned char element = value & 7; + + env->VRegs[regno].uh[(sizeof(MMVector) / 16) * lane + element]++; + } + } +} + +void HELPER(vhistq)(CPUHexagonState *env) +{ + MMVector *input = &env->tmp_VRegs[0]; + + for (int lane = 0; lane < 8; lane++) { + for (int i = 0; i < sizeof(MMVector) / 8; ++i) { + unsigned char value = input->ub[(sizeof(MMVector) / 8) * lane + i]; + unsigned char regno = value >> 3; + unsigned char element = value & 7; + + if (fGETQBIT(env->qtmp, sizeof(MMVector) / 8 * lane + i)) { + env->VRegs[regno].uh[ + (sizeof(MMVector) / 16) * lane + element]++; + } + } + } +} + +void HELPER(vwhist256)(CPUHexagonState *env) +{ + MMVector *input = &env->tmp_VRegs[0]; + + for (int i = 0; i < (sizeof(MMVector) / 2); i++) { + unsigned int bucket = fGETUBYTE(0, input->h[i]); + unsigned int weight = fGETUBYTE(1, input->h[i]); + unsigned int vindex = (bucket >> 3) & 0x1F; + unsigned int elindex = ((i >> 0) & (~7)) | ((bucket >> 0) & 7); + + env->VRegs[vindex].uh[elindex] = + env->VRegs[vindex].uh[elindex] + weight; + } +} + +void HELPER(vwhist256q)(CPUHexagonState *env) +{ + MMVector *input = &env->tmp_VRegs[0]; + + for (int i = 0; i < (sizeof(MMVector) / 2); i++) { + unsigned int bucket = fGETUBYTE(0, input->h[i]); + unsigned int weight = fGETUBYTE(1, input->h[i]); + unsigned int vindex = (bucket >> 3) & 0x1F; + unsigned int elindex = ((i >> 0) & (~7)) | ((bucket >> 0) & 7); + + if (fGETQBIT(env->qtmp, 2 * i)) { + env->VRegs[vindex].uh[elindex] = + env->VRegs[vindex].uh[elindex] + weight; + } + } +} + +void HELPER(vwhist256_sat)(CPUHexagonState *env) +{ + MMVector *input = &env->tmp_VRegs[0]; + + for (int i = 0; i < (sizeof(MMVector) / 2); i++) { + unsigned int bucket = fGETUBYTE(0, input->h[i]); + unsigned int weight = fGETUBYTE(1, input->h[i]); + unsigned int vindex = (bucket >> 3) & 0x1F; + unsigned int elindex = ((i >> 0) & (~7)) | ((bucket >> 0) & 7); + + env->VRegs[vindex].uh[elindex] = + fVSATUH(env->VRegs[vindex].uh[elindex] + weight); + } +} + +void HELPER(vwhist256q_sat)(CPUHexagonState *env) +{ + MMVector *input = &env->tmp_VRegs[0]; + + for (int i = 0; i < (sizeof(MMVector) / 2); i++) { + unsigned int bucket = fGETUBYTE(0, input->h[i]); + unsigned int weight = fGETUBYTE(1, input->h[i]); + unsigned int vindex = (bucket >> 3) & 0x1F; + unsigned int elindex = ((i >> 0) & (~7)) | ((bucket >> 0) & 7); + + if (fGETQBIT(env->qtmp, 2 * i)) { + env->VRegs[vindex].uh[elindex] = + fVSATUH(env->VRegs[vindex].uh[elindex] + weight); + } + } +} + +void HELPER(vwhist128)(CPUHexagonState *env) +{ + MMVector *input = &env->tmp_VRegs[0]; + + for (int i = 0; i < (sizeof(MMVector) / 2); i++) { + unsigned int bucket = fGETUBYTE(0, input->h[i]); + unsigned int weight = fGETUBYTE(1, input->h[i]); + unsigned int vindex = (bucket >> 3) & 0x1F; + unsigned int elindex = ((i >> 1) & (~3)) | ((bucket >> 1) & 3); + + env->VRegs[vindex].uw[elindex] = + env->VRegs[vindex].uw[elindex] + weight; + } +} + +void HELPER(vwhist128q)(CPUHexagonState *env) +{ + MMVector *input = &env->tmp_VRegs[0]; + + for (int i = 0; i < (sizeof(MMVector) / 2); i++) { + unsigned int bucket = fGETUBYTE(0, input->h[i]); + unsigned int weight = fGETUBYTE(1, input->h[i]); + unsigned int vindex = (bucket >> 3) & 0x1F; + unsigned int elindex = ((i >> 1) & (~3)) | ((bucket >> 1) & 3); + + if (fGETQBIT(env->qtmp, 2 * i)) { + env->VRegs[vindex].uw[elindex] = + env->VRegs[vindex].uw[elindex] + weight; + } + } +} + +void HELPER(vwhist128m)(CPUHexagonState *env, int32_t uiV) +{ + MMVector *input = &env->tmp_VRegs[0]; + + for (int i = 0; i < (sizeof(MMVector) / 2); i++) { + unsigned int bucket = fGETUBYTE(0, input->h[i]); + unsigned int weight = fGETUBYTE(1, input->h[i]); + unsigned int vindex = (bucket >> 3) & 0x1F; + unsigned int elindex = ((i >> 1) & (~3)) | ((bucket >> 1) & 3); + + if ((bucket & 1) == uiV) { + env->VRegs[vindex].uw[elindex] = + env->VRegs[vindex].uw[elindex] + weight; + } + } +} + +void HELPER(vwhist128qm)(CPUHexagonState *env, int32_t uiV) +{ + MMVector *input = &env->tmp_VRegs[0]; + + for (int i = 0; i < (sizeof(MMVector) / 2); i++) { + unsigned int bucket = fGETUBYTE(0, input->h[i]); + unsigned int weight = fGETUBYTE(1, input->h[i]); + unsigned int vindex = (bucket >> 3) & 0x1F; + unsigned int elindex = ((i >> 1) & (~3)) | ((bucket >> 1) & 3); + + if (((bucket & 1) == uiV) && fGETQBIT(env->qtmp, 2 * i)) { + env->VRegs[vindex].uw[elindex] = + env->VRegs[vindex].uw[elindex] + weight; + } + } +} + static void cancel_slot(CPUHexagonState *env, uint32_t slot) { HEX_DEBUG_LOG("Slot %d cancelled\n", slot); diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c index 6fb4e6853c..b6f541ecb2 100644 --- a/target/hexagon/translate.c +++ b/target/hexagon/translate.c @@ -19,6 +19,7 @@ #include "qemu/osdep.h" #include "cpu.h" #include "tcg/tcg-op.h" +#include "tcg/tcg-op-gvec.h" #include "exec/cpu_ldst.h" #include "exec/log.h" #include "internal.h" @@ -47,16 +48,63 @@ TCGv hex_dczero_addr; TCGv hex_llsc_addr; TCGv hex_llsc_val; TCGv_i64 hex_llsc_val_i64; +TCGv hex_VRegs_updated; +TCGv hex_QRegs_updated; +TCGv hex_vstore_addr[VSTORES_MAX]; +TCGv hex_vstore_size[VSTORES_MAX]; +TCGv hex_vstore_pending[VSTORES_MAX]; static const char * const hexagon_prednames[] = { "p0", "p1", "p2", "p3" }; +intptr_t ctx_future_vreg_off(DisasContext *ctx, int regnum, + int num, bool alloc_ok) +{ + intptr_t offset; + + /* See if it is already allocated */ + for (int i = 0; i < ctx->future_vregs_idx; i++) { + if (ctx->future_vregs_num[i] == regnum) { + return offsetof(CPUHexagonState, future_VRegs[i]); + } + } + + g_assert(alloc_ok); + offset = offsetof(CPUHexagonState, future_VRegs[ctx->future_vregs_idx]); + for (int i = 0; i < num; i++) { + ctx->future_vregs_num[ctx->future_vregs_idx + i] = regnum++; + } + ctx->future_vregs_idx += num; + g_assert(ctx->future_vregs_idx <= VECTOR_TEMPS_MAX); + return offset; +} + +intptr_t ctx_tmp_vreg_off(DisasContext *ctx, int regnum, + int num, bool alloc_ok) +{ + intptr_t offset; + + /* See if it is already allocated */ + for (int i = 0; i < ctx->tmp_vregs_idx; i++) { + if (ctx->tmp_vregs_num[i] == regnum) { + return offsetof(CPUHexagonState, tmp_VRegs[i]); + } + } + + g_assert(alloc_ok); + offset = offsetof(CPUHexagonState, tmp_VRegs[ctx->tmp_vregs_idx]); + for (int i = 0; i < num; i++) { + ctx->tmp_vregs_num[ctx->tmp_vregs_idx + i] = regnum++; + } + ctx->tmp_vregs_idx += num; + g_assert(ctx->tmp_vregs_idx <= VECTOR_TEMPS_MAX); + return offset; +} + static void gen_exception_raw(int excp) { - TCGv_i32 helper_tmp = tcg_const_i32(excp); - gen_helper_raise_exception(cpu_env, helper_tmp); - tcg_temp_free_i32(helper_tmp); + gen_helper_raise_exception(cpu_env, tcg_constant_i32(excp)); } static void gen_exec_counters(DisasContext *ctx) @@ -65,17 +113,15 @@ static void gen_exec_counters(DisasContext *ctx) hex_gpr[HEX_REG_QEMU_PKT_CNT], ctx->num_packets); tcg_gen_addi_tl(hex_gpr[HEX_REG_QEMU_INSN_CNT], hex_gpr[HEX_REG_QEMU_INSN_CNT], ctx->num_insns); + tcg_gen_addi_tl(hex_gpr[HEX_REG_QEMU_HVX_CNT], + hex_gpr[HEX_REG_QEMU_HVX_CNT], ctx->num_hvx_insns); } static void gen_end_tb(DisasContext *ctx) { gen_exec_counters(ctx); tcg_gen_mov_tl(hex_gpr[HEX_REG_PC], hex_next_PC); - if (ctx->base.singlestep_enabled) { - gen_exception_raw(EXCP_DEBUG); - } else { - tcg_gen_exit_tb(NULL, 0); - } + tcg_gen_exit_tb(NULL, 0); ctx->base.is_jmp = DISAS_NORETURN; } @@ -173,11 +219,19 @@ static void gen_start_packet(DisasContext *ctx, Packet *pkt) bitmap_zero(ctx->regs_written, TOTAL_PER_THREAD_REGS); ctx->preg_log_idx = 0; bitmap_zero(ctx->pregs_written, NUM_PREGS); + ctx->future_vregs_idx = 0; + ctx->tmp_vregs_idx = 0; + ctx->vreg_log_idx = 0; + bitmap_zero(ctx->vregs_updated_tmp, NUM_VREGS); + bitmap_zero(ctx->vregs_updated, NUM_VREGS); + bitmap_zero(ctx->vregs_select, NUM_VREGS); + ctx->qreg_log_idx = 0; for (i = 0; i < STORES_MAX; i++) { ctx->store_width[i] = 0; } tcg_gen_movi_tl(hex_pkt_has_store_s1, pkt->pkt_has_store_s1); ctx->s1_store_processed = false; + ctx->pre_commit = true; if (HEX_DEBUG) { /* Handy place to set a breakpoint before the packet executes */ @@ -199,6 +253,26 @@ static void gen_start_packet(DisasContext *ctx, Packet *pkt) if (need_pred_written(pkt)) { tcg_gen_movi_tl(hex_pred_written, 0); } + + if (pkt->pkt_has_hvx) { + tcg_gen_movi_tl(hex_VRegs_updated, 0); + tcg_gen_movi_tl(hex_QRegs_updated, 0); + } +} + +bool is_gather_store_insn(Insn *insn, Packet *pkt) +{ + if (GET_ATTRIB(insn->opcode, A_CVI_NEW) && + insn->new_value_producer_slot == 1) { + /* Look for gather instruction */ + for (int i = 0; i < pkt->num_insns; i++) { + Insn *in = &pkt->insn[i]; + if (GET_ATTRIB(in->opcode, A_CVI_GATHER) && in->slot == 1) { + return true; + } + } + } + return false; } /* @@ -210,7 +284,12 @@ static void mark_implicit_reg_write(DisasContext *ctx, Insn *insn, int attrib, int rnum) { if (GET_ATTRIB(insn->opcode, attrib)) { - bool is_predicated = GET_ATTRIB(insn->opcode, A_CONDEXEC); + /* + * USR is used to set overflow and FP exceptions, + * so treat it as conditional + */ + bool is_predicated = GET_ATTRIB(insn->opcode, A_CONDEXEC) || + rnum == HEX_REG_USR; if (is_predicated && !is_preloaded(ctx, rnum)) { tcg_gen_mov_tl(hex_new_value[rnum], hex_gpr[rnum]); } @@ -236,6 +315,8 @@ static void mark_implicit_reg_writes(DisasContext *ctx, Insn *insn) mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_SA0, HEX_REG_SA0); mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_LC1, HEX_REG_LC1); mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_SA1, HEX_REG_SA1); + mark_implicit_reg_write(ctx, insn, A_IMPLICIT_WRITES_USR, HEX_REG_USR); + mark_implicit_reg_write(ctx, insn, A_FPOP, HEX_REG_USR); } static void mark_implicit_pred_writes(DisasContext *ctx, Insn *insn) @@ -288,7 +369,7 @@ static void gen_pred_writes(DisasContext *ctx, Packet *pkt) * write of the predicates. */ if (pkt->pkt_has_endloop) { - TCGv zero = tcg_const_tl(0); + TCGv zero = tcg_constant_tl(0); TCGv pred_written = tcg_temp_new(); for (i = 0; i < ctx->preg_log_idx; i++) { int pred_num = ctx->preg_log[i]; @@ -299,7 +380,6 @@ static void gen_pred_writes(DisasContext *ctx, Packet *pkt) hex_new_pred_value[pred_num], hex_pred[pred_num]); } - tcg_temp_free(zero); tcg_temp_free(pred_written); } else { for (i = 0; i < ctx->preg_log_idx; i++) { @@ -317,11 +397,9 @@ static void gen_pred_writes(DisasContext *ctx, Packet *pkt) static void gen_check_store_width(DisasContext *ctx, int slot_num) { if (HEX_DEBUG) { - TCGv slot = tcg_const_tl(slot_num); - TCGv check = tcg_const_tl(ctx->store_width[slot_num]); + TCGv slot = tcg_constant_tl(slot_num); + TCGv check = tcg_constant_tl(ctx->store_width[slot_num]); gen_helper_debug_check_store_width(cpu_env, slot, check); - tcg_temp_free(slot); - tcg_temp_free(check); } } @@ -403,9 +481,8 @@ void process_store(DisasContext *ctx, Packet *pkt, int slot_num) * TCG generation time, we'll use a helper to * avoid branching based on the width at runtime. */ - TCGv slot = tcg_const_tl(slot_num); + TCGv slot = tcg_constant_tl(slot_num); gen_helper_commit_store(cpu_env, slot); - tcg_temp_free(slot); } } tcg_temp_free(address); @@ -419,7 +496,7 @@ static void process_store_log(DisasContext *ctx, Packet *pkt) { /* * When a packet has two stores, the hardware processes - * slot 1 and then slot 2. This will be important when + * slot 1 and then slot 0. This will be important when * the memory accesses overlap. */ if (pkt->pkt_has_store_s1 && !pkt->pkt_has_dczeroa) { @@ -436,7 +513,7 @@ static void process_dczeroa(DisasContext *ctx, Packet *pkt) if (pkt->pkt_has_dczeroa) { /* Store 32 bytes of zero starting at (addr & ~0x1f) */ TCGv addr = tcg_temp_new(); - TCGv_i64 zero = tcg_const_i64(0); + TCGv_i64 zero = tcg_constant_i64(0); tcg_gen_andi_tl(addr, hex_dczero_addr, ~0x1f); tcg_gen_qemu_st64(zero, addr, ctx->mem_idx); @@ -448,7 +525,93 @@ static void process_dczeroa(DisasContext *ctx, Packet *pkt) tcg_gen_qemu_st64(zero, addr, ctx->mem_idx); tcg_temp_free(addr); - tcg_temp_free_i64(zero); + } +} + +static bool pkt_has_hvx_store(Packet *pkt) +{ + int i; + for (i = 0; i < pkt->num_insns; i++) { + int opcode = pkt->insn[i].opcode; + if (GET_ATTRIB(opcode, A_CVI) && GET_ATTRIB(opcode, A_STORE)) { + return true; + } + } + return false; +} + +static void gen_commit_hvx(DisasContext *ctx, Packet *pkt) +{ + int i; + + /* + * for (i = 0; i < ctx->vreg_log_idx; i++) { + * int rnum = ctx->vreg_log[i]; + * if (ctx->vreg_is_predicated[i]) { + * if (env->VRegs_updated & (1 << rnum)) { + * env->VRegs[rnum] = env->future_VRegs[rnum]; + * } + * } else { + * env->VRegs[rnum] = env->future_VRegs[rnum]; + * } + * } + */ + for (i = 0; i < ctx->vreg_log_idx; i++) { + int rnum = ctx->vreg_log[i]; + bool is_predicated = ctx->vreg_is_predicated[i]; + intptr_t dstoff = offsetof(CPUHexagonState, VRegs[rnum]); + intptr_t srcoff = ctx_future_vreg_off(ctx, rnum, 1, false); + size_t size = sizeof(MMVector); + + if (is_predicated) { + TCGv cmp = tcg_temp_new(); + TCGLabel *label_skip = gen_new_label(); + + tcg_gen_andi_tl(cmp, hex_VRegs_updated, 1 << rnum); + tcg_gen_brcondi_tl(TCG_COND_EQ, cmp, 0, label_skip); + tcg_temp_free(cmp); + tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size); + gen_set_label(label_skip); + } else { + tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size); + } + } + + /* + * for (i = 0; i < ctx->qreg_log_idx; i++) { + * int rnum = ctx->qreg_log[i]; + * if (ctx->qreg_is_predicated[i]) { + * if (env->QRegs_updated) & (1 << rnum)) { + * env->QRegs[rnum] = env->future_QRegs[rnum]; + * } + * } else { + * env->QRegs[rnum] = env->future_QRegs[rnum]; + * } + * } + */ + for (i = 0; i < ctx->qreg_log_idx; i++) { + int rnum = ctx->qreg_log[i]; + bool is_predicated = ctx->qreg_is_predicated[i]; + intptr_t dstoff = offsetof(CPUHexagonState, QRegs[rnum]); + intptr_t srcoff = offsetof(CPUHexagonState, future_QRegs[rnum]); + size_t size = sizeof(MMQReg); + + if (is_predicated) { + TCGv cmp = tcg_temp_new(); + TCGLabel *label_skip = gen_new_label(); + + tcg_gen_andi_tl(cmp, hex_QRegs_updated, 1 << rnum); + tcg_gen_brcondi_tl(TCG_COND_EQ, cmp, 0, label_skip); + tcg_temp_free(cmp); + tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size); + gen_set_label(label_skip); + } else { + tcg_gen_gvec_mov(MO_64, dstoff, srcoff, size, size); + } + } + + if (pkt_has_hvx_store(pkt)) { + gen_helper_commit_hvx_stores(cpu_env); } } @@ -456,6 +619,7 @@ static void update_exec_counters(DisasContext *ctx, Packet *pkt) { int num_insns = pkt->num_insns; int num_real_insns = 0; + int num_hvx_insns = 0; for (int i = 0; i < num_insns; i++) { if (!pkt->insn[i].is_endloop && @@ -463,30 +627,97 @@ static void update_exec_counters(DisasContext *ctx, Packet *pkt) !GET_ATTRIB(pkt->insn[i].opcode, A_IT_NOP)) { num_real_insns++; } + if (GET_ATTRIB(pkt->insn[i].opcode, A_CVI)) { + num_hvx_insns++; + } } ctx->num_packets++; ctx->num_insns += num_real_insns; + ctx->num_hvx_insns += num_hvx_insns; } -static void gen_commit_packet(DisasContext *ctx, Packet *pkt) +static void gen_commit_packet(CPUHexagonState *env, DisasContext *ctx, + Packet *pkt) { + /* + * If there is more than one store in a packet, make sure they are all OK + * before proceeding with the rest of the packet commit. + * + * dczeroa has to be the only store operation in the packet, so we go + * ahead and process that first. + * + * When there is an HVX store, there can also be a scalar store in either + * slot 0 or slot1, so we create a mask for the helper to indicate what + * work to do. + * + * When there are two scalar stores, we probe the one in slot 0. + * + * Note that we don't call the probe helper for packets with only one + * store. Therefore, we call process_store_log before anything else + * involved in committing the packet. + */ + bool has_store_s0 = pkt->pkt_has_store_s0; + bool has_store_s1 = (pkt->pkt_has_store_s1 && !ctx->s1_store_processed); + bool has_hvx_store = pkt_has_hvx_store(pkt); + if (pkt->pkt_has_dczeroa) { + /* + * The dczeroa will be the store in slot 0, check that we don't have + * a store in slot 1 or an HVX store. + */ + g_assert(has_store_s0 && !has_store_s1 && !has_hvx_store); + process_dczeroa(ctx, pkt); + } else if (has_hvx_store) { + TCGv mem_idx = tcg_constant_tl(ctx->mem_idx); + + if (!has_store_s0 && !has_store_s1) { + gen_helper_probe_hvx_stores(cpu_env, mem_idx); + } else { + int mask = 0; + TCGv mask_tcgv; + + if (has_store_s0) { + mask |= (1 << 0); + } + if (has_store_s1) { + mask |= (1 << 1); + } + if (has_hvx_store) { + mask |= (1 << 2); + } + mask_tcgv = tcg_constant_tl(mask); + gen_helper_probe_pkt_scalar_hvx_stores(cpu_env, mask_tcgv, mem_idx); + } + } else if (has_store_s0 && has_store_s1) { + /* + * process_store_log will execute the slot 1 store first, + * so we only have to probe the store in slot 0 + */ + TCGv mem_idx = tcg_constant_tl(ctx->mem_idx); + gen_helper_probe_pkt_scalar_store_s0(cpu_env, mem_idx); + } + + process_store_log(ctx, pkt); + gen_reg_writes(ctx); gen_pred_writes(ctx, pkt); - process_store_log(ctx, pkt); - process_dczeroa(ctx, pkt); + if (pkt->pkt_has_hvx) { + gen_commit_hvx(ctx, pkt); + } update_exec_counters(ctx, pkt); if (HEX_DEBUG) { TCGv has_st0 = - tcg_const_tl(pkt->pkt_has_store_s0 && !pkt->pkt_has_dczeroa); + tcg_constant_tl(pkt->pkt_has_store_s0 && !pkt->pkt_has_dczeroa); TCGv has_st1 = - tcg_const_tl(pkt->pkt_has_store_s1 && !pkt->pkt_has_dczeroa); + tcg_constant_tl(pkt->pkt_has_store_s1 && !pkt->pkt_has_dczeroa); /* Handy place to set a breakpoint at the end of execution */ gen_helper_debug_commit_end(cpu_env, has_st0, has_st1); + } - tcg_temp_free(has_st0); - tcg_temp_free(has_st1); + if (pkt->vhist_insn != NULL) { + ctx->pre_commit = false; + pkt->vhist_insn->generate(env, ctx, pkt->vhist_insn, pkt); } if (pkt->pkt_has_cof) { @@ -513,7 +744,7 @@ static void decode_and_translate_packet(CPUHexagonState *env, DisasContext *ctx) for (i = 0; i < pkt.num_insns; i++) { gen_insn(env, ctx, &pkt.insn[i], &pkt); } - gen_commit_packet(ctx, &pkt); + gen_commit_packet(env, ctx, &pkt); ctx->base.pc_next += pkt.encod_pkt_size_in_bytes; } else { gen_exception_end_tb(ctx, HEX_EXCP_INVALID_PACKET); @@ -528,6 +759,7 @@ static void hexagon_tr_init_disas_context(DisasContextBase *dcbase, ctx->mem_idx = MMU_USER_IDX; ctx->num_packets = 0; ctx->num_insns = 0; + ctx->num_hvx_insns = 0; } static void hexagon_tr_tb_start(DisasContextBase *db, CPUState *cpu) @@ -592,11 +824,7 @@ static void hexagon_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) case DISAS_TOO_MANY: gen_exec_counters(ctx); tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], ctx->base.pc_next); - if (ctx->base.singlestep_enabled) { - gen_exception_raw(EXCP_DEBUG); - } else { - tcg_gen_exit_tb(NULL, 0); - } + tcg_gen_exit_tb(NULL, 0); break; case DISAS_NORETURN: break; @@ -636,6 +864,9 @@ static char store_addr_names[STORES_MAX][NAME_LEN]; static char store_width_names[STORES_MAX][NAME_LEN]; static char store_val32_names[STORES_MAX][NAME_LEN]; static char store_val64_names[STORES_MAX][NAME_LEN]; +static char vstore_addr_names[VSTORES_MAX][NAME_LEN]; +static char vstore_size_names[VSTORES_MAX][NAME_LEN]; +static char vstore_pending_names[VSTORES_MAX][NAME_LEN]; void hexagon_translate_init(void) { @@ -698,6 +929,10 @@ void hexagon_translate_init(void) offsetof(CPUHexagonState, llsc_val), "llsc_val"); hex_llsc_val_i64 = tcg_global_mem_new_i64(cpu_env, offsetof(CPUHexagonState, llsc_val_i64), "llsc_val_i64"); + hex_VRegs_updated = tcg_global_mem_new(cpu_env, + offsetof(CPUHexagonState, VRegs_updated), "VRegs_updated"); + hex_QRegs_updated = tcg_global_mem_new(cpu_env, + offsetof(CPUHexagonState, QRegs_updated), "QRegs_updated"); for (i = 0; i < STORES_MAX; i++) { snprintf(store_addr_names[i], NAME_LEN, "store_addr_%d", i); hex_store_addr[i] = tcg_global_mem_new(cpu_env, @@ -719,4 +954,20 @@ void hexagon_translate_init(void) offsetof(CPUHexagonState, mem_log_stores[i].data64), store_val64_names[i]); } + for (int i = 0; i < VSTORES_MAX; i++) { + snprintf(vstore_addr_names[i], NAME_LEN, "vstore_addr_%d", i); + hex_vstore_addr[i] = tcg_global_mem_new(cpu_env, + offsetof(CPUHexagonState, vstore[i].va), + vstore_addr_names[i]); + + snprintf(vstore_size_names[i], NAME_LEN, "vstore_size_%d", i); + hex_vstore_size[i] = tcg_global_mem_new(cpu_env, + offsetof(CPUHexagonState, vstore[i].size), + vstore_size_names[i]); + + snprintf(vstore_pending_names[i], NAME_LEN, "vstore_pending_%d", i); + hex_vstore_pending[i] = tcg_global_mem_new(cpu_env, + offsetof(CPUHexagonState, vstore_pending[i]), + vstore_pending_names[i]); + } } diff --git a/target/hexagon/translate.h b/target/hexagon/translate.h index 703fd1345f..fccfb94340 100644 --- a/target/hexagon/translate.h +++ b/target/hexagon/translate.h @@ -29,6 +29,7 @@ typedef struct DisasContext { uint32_t mem_idx; uint32_t num_packets; uint32_t num_insns; + uint32_t num_hvx_insns; int reg_log[REG_WRITES_MAX]; int reg_log_idx; DECLARE_BITMAP(regs_written, TOTAL_PER_THREAD_REGS); @@ -37,6 +38,20 @@ typedef struct DisasContext { DECLARE_BITMAP(pregs_written, NUM_PREGS); uint8_t store_width[STORES_MAX]; bool s1_store_processed; + int future_vregs_idx; + int future_vregs_num[VECTOR_TEMPS_MAX]; + int tmp_vregs_idx; + int tmp_vregs_num[VECTOR_TEMPS_MAX]; + int vreg_log[NUM_VREGS]; + bool vreg_is_predicated[NUM_VREGS]; + int vreg_log_idx; + DECLARE_BITMAP(vregs_updated_tmp, NUM_VREGS); + DECLARE_BITMAP(vregs_updated, NUM_VREGS); + DECLARE_BITMAP(vregs_select, NUM_VREGS); + int qreg_log[NUM_QREGS]; + bool qreg_is_predicated[NUM_QREGS]; + int qreg_log_idx; + bool pre_commit; } DisasContext; static inline void ctx_log_reg_write(DisasContext *ctx, int rnum) @@ -67,6 +82,46 @@ static inline bool is_preloaded(DisasContext *ctx, int num) return test_bit(num, ctx->regs_written); } +intptr_t ctx_future_vreg_off(DisasContext *ctx, int regnum, + int num, bool alloc_ok); +intptr_t ctx_tmp_vreg_off(DisasContext *ctx, int regnum, + int num, bool alloc_ok); + +static inline void ctx_log_vreg_write(DisasContext *ctx, + int rnum, VRegWriteType type, + bool is_predicated) +{ + if (type != EXT_TMP) { + ctx->vreg_log[ctx->vreg_log_idx] = rnum; + ctx->vreg_is_predicated[ctx->vreg_log_idx] = is_predicated; + ctx->vreg_log_idx++; + + set_bit(rnum, ctx->vregs_updated); + } + if (type == EXT_NEW) { + set_bit(rnum, ctx->vregs_select); + } + if (type == EXT_TMP) { + set_bit(rnum, ctx->vregs_updated_tmp); + } +} + +static inline void ctx_log_vreg_write_pair(DisasContext *ctx, + int rnum, VRegWriteType type, + bool is_predicated) +{ + ctx_log_vreg_write(ctx, rnum ^ 0, type, is_predicated); + ctx_log_vreg_write(ctx, rnum ^ 1, type, is_predicated); +} + +static inline void ctx_log_qreg_write(DisasContext *ctx, + int rnum, bool is_predicated) +{ + ctx->qreg_log[ctx->qreg_log_idx] = rnum; + ctx->qreg_is_predicated[ctx->qreg_log_idx] = is_predicated; + ctx->qreg_log_idx++; +} + extern TCGv hex_gpr[TOTAL_PER_THREAD_REGS]; extern TCGv hex_pred[NUM_PREGS]; extern TCGv hex_next_PC; @@ -85,6 +140,12 @@ extern TCGv hex_dczero_addr; extern TCGv hex_llsc_addr; extern TCGv hex_llsc_val; extern TCGv_i64 hex_llsc_val_i64; +extern TCGv hex_VRegs_updated; +extern TCGv hex_QRegs_updated; +extern TCGv hex_vstore_addr[VSTORES_MAX]; +extern TCGv hex_vstore_size[VSTORES_MAX]; +extern TCGv hex_vstore_pending[VSTORES_MAX]; +bool is_gather_store_insn(Insn *insn, Packet *pkt); void process_store(DisasContext *ctx, Packet *pkt, int slot_num); #endif diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c index 89cba9d7a2..23eb254228 100644 --- a/target/hppa/cpu.c +++ b/target/hppa/cpu.c @@ -145,9 +145,9 @@ static const struct SysemuCPUOps hppa_sysemu_ops = { static const struct TCGCPUOps hppa_tcg_ops = { .initialize = hppa_translate_init, .synchronize_from_tb = hppa_cpu_synchronize_from_tb, - .tlb_fill = hppa_cpu_tlb_fill, #ifndef CONFIG_USER_ONLY + .tlb_fill = hppa_cpu_tlb_fill, .cpu_exec_interrupt = hppa_cpu_exec_interrupt, .do_interrupt = hppa_cpu_do_interrupt, .do_unaligned_access = hppa_cpu_do_unaligned_access, diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h index d3cb7a279f..294fd7297f 100644 --- a/target/hppa/cpu.h +++ b/target/hppa/cpu.h @@ -323,10 +323,10 @@ hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr); int hppa_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int hppa_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void hppa_cpu_dump_state(CPUState *cs, FILE *f, int); +#ifndef CONFIG_USER_ONLY bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); -#ifndef CONFIG_USER_ONLY void hppa_cpu_do_interrupt(CPUState *cpu); bool hppa_cpu_exec_interrupt(CPUState *cpu, int int_req); int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c index afc5b56c3e..bf07445cd1 100644 --- a/target/hppa/mem_helper.c +++ b/target/hppa/mem_helper.c @@ -24,20 +24,6 @@ #include "hw/core/cpu.h" #include "trace.h" -#ifdef CONFIG_USER_ONLY -bool hppa_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) -{ - HPPACPU *cpu = HPPA_CPU(cs); - - /* ??? Test between data page fault and data memory protection trap, - which would affect si_code. */ - cs->exception_index = EXCP_DMP; - cpu->env.cr[CR_IOR] = address; - cpu_loop_exit_restore(cs, retaddr); -} -#else static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr) { int i; @@ -392,4 +378,3 @@ int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr) hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr); return ent ? ent->ar_type : -1; } -#endif /* CONFIG_USER_ONLY */ diff --git a/target/hppa/meson.build b/target/hppa/meson.build index 8a7ff82efc..021e42a2d0 100644 --- a/target/hppa/meson.build +++ b/target/hppa/meson.build @@ -7,13 +7,15 @@ hppa_ss.add(files( 'gdbstub.c', 'helper.c', 'int_helper.c', - 'mem_helper.c', 'op_helper.c', 'translate.c', )) hppa_softmmu_ss = ss.source_set() -hppa_softmmu_ss.add(files('machine.c')) +hppa_softmmu_ss.add(files( + 'machine.c', + 'mem_helper.c', +)) target_arch += {'hppa': hppa_ss} target_softmmu_arch += {'hppa': hppa_softmmu_ss} diff --git a/target/hppa/translate.c b/target/hppa/translate.c index c3698cf067..3b9744deb4 100644 --- a/target/hppa/translate.c +++ b/target/hppa/translate.c @@ -814,11 +814,7 @@ static void gen_goto_tb(DisasContext *ctx, int which, } else { copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b); copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var); - if (ctx->base.singlestep_enabled) { - gen_excp_1(EXCP_DEBUG); - } else { - tcg_gen_lookup_and_goto_ptr(); - } + tcg_gen_lookup_and_goto_ptr(); } } @@ -2346,11 +2342,7 @@ static bool do_rfi(DisasContext *ctx, bool rfi_r) gen_helper_rfi(cpu_env); } /* Exit the TB to recognize new interrupts. */ - if (ctx->base.singlestep_enabled) { - gen_excp_1(EXCP_DEBUG); - } else { - tcg_gen_exit_tb(NULL, 0); - } + tcg_gen_exit_tb(NULL, 0); ctx->base.is_jmp = DISAS_NORETURN; return nullify_end(ctx); @@ -4274,10 +4266,9 @@ static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) nullify_save(ctx); /* FALLTHRU */ case DISAS_IAQ_N_UPDATED: - if (ctx->base.singlestep_enabled) { - gen_excp_1(EXCP_DEBUG); - } else if (is_jmp != DISAS_IAQ_N_STALE_EXIT) { + if (is_jmp != DISAS_IAQ_N_STALE_EXIT) { tcg_gen_lookup_and_goto_ptr(); + break; } /* FALLTHRU */ case DISAS_EXIT: diff --git a/target/i386/cpu-dump.c b/target/i386/cpu-dump.c index 02b635a52c..08ac957e99 100644 --- a/target/i386/cpu-dump.c +++ b/target/i386/cpu-dump.c @@ -464,13 +464,13 @@ void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags) snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op); #ifdef TARGET_X86_64 if (env->hflags & HF_CS64_MASK) { - qemu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n", + qemu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%s\n", env->cc_src, env->cc_dst, cc_op_name); } else #endif { - qemu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n", + qemu_fprintf(f, "CCS=%08x CCD=%08x CCO=%s\n", (uint32_t)env->cc_src, (uint32_t)env->cc_dst, cc_op_name); } diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 6b029f1bdf..aa9e636800 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -26,7 +26,8 @@ #include "sysemu/reset.h" #include "sysemu/hvf.h" #include "kvm/kvm_i386.h" -#include "sev_i386.h" +#include "sev.h" +#include "qapi/error.h" #include "qapi/qapi-visit-machine.h" #include "qapi/qmp/qerror.h" #include "qapi/qapi-commands-machine-target.h" @@ -36,6 +37,7 @@ #ifndef CONFIG_USER_ONLY #include "exec/address-spaces.h" #include "hw/boards.h" +#include "hw/i386/sgx-epc.h" #endif #include "disas/capstone.h" @@ -654,6 +656,9 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, /* missing: CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */ #define TCG_14_0_ECX_FEATURES 0 +#define TCG_SGX_12_0_EAX_FEATURES 0 +#define TCG_SGX_12_0_EBX_FEATURES 0 +#define TCG_SGX_12_1_EAX_FEATURES 0 FeatureWordInfo feature_word_info[FEATURE_WORDS] = { [FEAT_1_EDX] = { @@ -795,7 +800,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { [FEAT_7_0_EBX] = { .type = CPUID_FEATURE_WORD, .feat_names = { - "fsgsbase", "tsc-adjust", NULL, "bmi1", + "fsgsbase", "tsc-adjust", "sgx", "bmi1", "hle", "avx2", NULL, "smep", "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL, @@ -821,7 +826,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { "la57", NULL, NULL, NULL, NULL, NULL, "rdpid", NULL, "bus-lock-detect", "cldemote", NULL, "movdiri", - "movdir64b", NULL, NULL, "pks", + "movdir64b", NULL, "sgxlc", "pks", }, .cpuid = { .eax = 7, @@ -1182,6 +1187,65 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { .tcg_features = TCG_14_0_ECX_FEATURES, }, + [FEAT_SGX_12_0_EAX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + "sgx1", "sgx2", NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { + .eax = 0x12, + .needs_ecx = true, .ecx = 0, + .reg = R_EAX, + }, + .tcg_features = TCG_SGX_12_0_EAX_FEATURES, + }, + + [FEAT_SGX_12_0_EBX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + "sgx-exinfo" , NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { + .eax = 0x12, + .needs_ecx = true, .ecx = 0, + .reg = R_EBX, + }, + .tcg_features = TCG_SGX_12_0_EBX_FEATURES, + }, + + [FEAT_SGX_12_1_EAX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + NULL, "sgx-debug", "sgx-mode64", NULL, + "sgx-provisionkey", "sgx-tokenkey", NULL, "sgx-kss", + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { + .eax = 0x12, + .needs_ecx = true, .ecx = 1, + .reg = R_EAX, + }, + .tcg_features = TCG_SGX_12_1_EAX_FEATURES, + }, }; typedef struct FeatureMask { @@ -3685,9 +3749,10 @@ static const X86CPUDefinition builtin_x86_defs[] = { }, { .version = 4, - .note = "no split lock detect", + .note = "no split lock detect, no core-capability", .props = (PropValue[]) { { "split-lock-detect", "off" }, + { "core-capability", "off" }, { /* end of list */ }, }, }, @@ -4813,7 +4878,7 @@ static void x86_cpu_list_entry(gpointer data, gpointer user_data) desc = g_strdup_printf("%s", model_id); } - qemu_printf("x86 %-20s %-58s\n", name, desc); + qemu_printf("x86 %-20s %s\n", name, desc); } /* list available CPU models and flags */ @@ -5272,6 +5337,25 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, *ecx |= CPUID_7_0_ECX_OSPKE; } *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */ + + /* + * SGX cannot be emulated in software. If hardware does not + * support enabling SGX and/or SGX flexible launch control, + * then we need to update the VM's CPUID values accordingly. + */ + if ((*ebx & CPUID_7_0_EBX_SGX) && + (!kvm_enabled() || + !(kvm_arch_get_supported_cpuid(cs->kvm_state, 0x7, 0, R_EBX) & + CPUID_7_0_EBX_SGX))) { + *ebx &= ~CPUID_7_0_EBX_SGX; + } + + if ((*ecx & CPUID_7_0_ECX_SGX_LC) && + (!(*ebx & CPUID_7_0_EBX_SGX) || !kvm_enabled() || + !(kvm_arch_get_supported_cpuid(cs->kvm_state, 0x7, 0, R_ECX) & + CPUID_7_0_ECX_SGX_LC))) { + *ecx &= ~CPUID_7_0_ECX_SGX_LC; + } } else if (count == 1) { *eax = env->features[FEAT_7_1_EAX]; *ebx = 0; @@ -5407,6 +5491,66 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } break; } + case 0x12: +#ifndef CONFIG_USER_ONLY + if (!kvm_enabled() || + !(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SGX)) { + *eax = *ebx = *ecx = *edx = 0; + break; + } + + /* + * SGX sub-leafs CPUID.0x12.{0x2..N} enumerate EPC sections. Retrieve + * the EPC properties, e.g. confidentiality and integrity, from the + * host's first EPC section, i.e. assume there is one EPC section or + * that all EPC sections have the same security properties. + */ + if (count > 1) { + uint64_t epc_addr, epc_size; + + if (sgx_epc_get_section(count - 2, &epc_addr, &epc_size)) { + *eax = *ebx = *ecx = *edx = 0; + break; + } + host_cpuid(index, 2, eax, ebx, ecx, edx); + *eax = (uint32_t)(epc_addr & 0xfffff000) | 0x1; + *ebx = (uint32_t)(epc_addr >> 32); + *ecx = (uint32_t)(epc_size & 0xfffff000) | (*ecx & 0xf); + *edx = (uint32_t)(epc_size >> 32); + break; + } + + /* + * SGX sub-leafs CPUID.0x12.{0x0,0x1} are heavily dependent on hardware + * and KVM, i.e. QEMU cannot emulate features to override what KVM + * supports. Features can be further restricted by userspace, but not + * made more permissive. + */ + *eax = kvm_arch_get_supported_cpuid(cs->kvm_state, 0x12, count, R_EAX); + *ebx = kvm_arch_get_supported_cpuid(cs->kvm_state, 0x12, count, R_EBX); + *ecx = kvm_arch_get_supported_cpuid(cs->kvm_state, 0x12, count, R_ECX); + *edx = kvm_arch_get_supported_cpuid(cs->kvm_state, 0x12, count, R_EDX); + + if (count == 0) { + *eax &= env->features[FEAT_SGX_12_0_EAX]; + *ebx &= env->features[FEAT_SGX_12_0_EBX]; + } else { + *eax &= env->features[FEAT_SGX_12_1_EAX]; + *ebx &= 0; /* ebx reserve */ + *ecx &= env->features[FEAT_XSAVE_COMP_LO]; + *edx &= env->features[FEAT_XSAVE_COMP_HI]; + + /* FP and SSE are always allowed regardless of XSAVE/XCR0. */ + *ecx |= XSTATE_FP_MASK | XSTATE_SSE_MASK; + + /* Access to PROVISIONKEY requires additional credentials. */ + if ((*eax & (1U << 4)) && + !kvm_enable_sgx_provisioning(cs->kvm_state)) { + *eax &= ~(1U << 4); + } + } +#endif + break; case 0x14: { /* Intel Processor Trace Enumeration */ *eax = 0; @@ -5621,12 +5765,13 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, *edx = 0; break; case 0x8000001F: - *eax = sev_enabled() ? 0x2 : 0; - *eax |= sev_es_enabled() ? 0x8 : 0; - *ebx = sev_get_cbit_position(); - *ebx |= sev_get_reduced_phys_bits() << 6; - *ecx = 0; - *edx = 0; + *eax = *ebx = *ecx = *edx = 0; + if (sev_enabled()) { + *eax = 0x2; + *eax |= sev_es_enabled() ? 0x8 : 0; + *ebx = sev_get_cbit_position(); + *ebx |= sev_get_reduced_phys_bits() << 6; + } break; default: /* reserved values: zero */ @@ -5638,6 +5783,17 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } } +static void x86_cpu_set_sgxlepubkeyhash(CPUX86State *env) +{ +#ifndef CONFIG_USER_ONLY + /* Those default values are defined in Skylake HW */ + env->msr_ia32_sgxlepubkeyhash[0] = 0xa6053e051270b7acULL; + env->msr_ia32_sgxlepubkeyhash[1] = 0x6cfbe8ba8b3b413dULL; + env->msr_ia32_sgxlepubkeyhash[2] = 0xc4916d99f2b3735dULL; + env->msr_ia32_sgxlepubkeyhash[3] = 0xd4f8c05909f9bb3bULL; +#endif +} + static void x86_cpu_reset(DeviceState *dev) { CPUState *s = CPU(dev); @@ -5770,6 +5926,13 @@ static void x86_cpu_reset(DeviceState *dev) if (kvm_enabled()) { kvm_arch_reset_vcpu(cpu); } + + x86_cpu_set_sgxlepubkeyhash(env); + + if (env->features[FEAT_SVM] & CPUID_SVM_TSCSCALE) { + env->amd_tsc_scale_msr = MSR_AMD64_TSC_RATIO_DEFAULT; + } + #endif } @@ -5999,6 +6162,11 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp) if (sev_enabled()) { x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F); } + + /* SGX requires CPUID[0x12] for EPC enumeration */ + if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SGX) { + x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x12); + } } /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */ @@ -6098,10 +6266,6 @@ static void x86_cpu_hyperv_realize(X86CPU *cpu) cpu->hyperv_interface_id[2] = 0; cpu->hyperv_interface_id[3] = 0; - /* Hypervisor system identity */ - cpu->hyperv_version_id[0] = 0x00001bbc; - cpu->hyperv_version_id[1] = 0x00060001; - /* Hypervisor implementation limits */ cpu->hyperv_limits[0] = 64; cpu->hyperv_limits[1] = 0; @@ -6152,6 +6316,8 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) & CPUID_EXT2_AMD_ALIASES); } + x86_cpu_set_sgxlepubkeyhash(env); + /* * note: the call to the framework needs to happen after feature expansion, * but before the checks/modifications to ucode_rev, mwait, phys_bits. @@ -6482,6 +6648,8 @@ static void x86_cpu_initfn(Object *obj) object_property_add_alias(obj, "sse4_1", obj, "sse4.1"); object_property_add_alias(obj, "sse4_2", obj, "sse4.2"); + object_property_add_alias(obj, "hv-apicv", obj, "hv-avic"); + if (xcc->model) { x86_cpu_load_model(cpu, xcc->model); } @@ -6669,9 +6837,23 @@ static Property x86_cpu_properties[] = { HYPERV_FEAT_IPI, 0), DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features, HYPERV_FEAT_STIMER_DIRECT, 0), + DEFINE_PROP_BIT64("hv-avic", X86CPU, hyperv_features, + HYPERV_FEAT_AVIC, 0), DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU, hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF), DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false), + DEFINE_PROP_BOOL("hv-enforce-cpuid", X86CPU, hyperv_enforce_cpuid, false), + + /* WS2008R2 identify by default */ + DEFINE_PROP_UINT32("hv-version-id-build", X86CPU, hyperv_ver_id_build, + 0x3839), + DEFINE_PROP_UINT16("hv-version-id-major", X86CPU, hyperv_ver_id_major, + 0x000A), + DEFINE_PROP_UINT16("hv-version-id-minor", X86CPU, hyperv_ver_id_minor, + 0x0000), + DEFINE_PROP_UINT32("hv-version-id-spack", X86CPU, hyperv_ver_id_sp, 0), + DEFINE_PROP_UINT8("hv-version-id-sbranch", X86CPU, hyperv_ver_id_sb, 0), + DEFINE_PROP_UINT32("hv-version-id-snumber", X86CPU, hyperv_ver_id_sn, 0), DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), @@ -6698,6 +6880,8 @@ static Property x86_cpu_properties[] = { DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true), DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration, false), + DEFINE_PROP_BOOL("kvm-pv-enforce-cpuid", X86CPU, kvm_pv_enforce_cpuid, + false), DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count, @@ -6839,7 +7023,6 @@ static const TypeInfo x86_cpu_type_info = { .class_init = x86_cpu_common_class_init, }; - /* "base" CPU model, used by query-cpu-model-expansion */ static void x86_cpu_base_class_init(ObjectClass *oc, void *data) { diff --git a/target/i386/cpu.h b/target/i386/cpu.h index c2954c71ea..04f2b790c9 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -389,9 +389,17 @@ typedef enum X86Seg { #define MSR_IA32_PKRS 0x6e1 #define FEATURE_CONTROL_LOCKED (1<<0) +#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1ULL << 1) #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2) +#define FEATURE_CONTROL_SGX_LC (1ULL << 17) +#define FEATURE_CONTROL_SGX (1ULL << 18) #define FEATURE_CONTROL_LMCE (1<<20) +#define MSR_IA32_SGXLEPUBKEYHASH0 0x8c +#define MSR_IA32_SGXLEPUBKEYHASH1 0x8d +#define MSR_IA32_SGXLEPUBKEYHASH2 0x8e +#define MSR_IA32_SGXLEPUBKEYHASH3 0x8f + #define MSR_P6_PERFCTR0 0xc1 #define MSR_IA32_SMBASE 0x9e @@ -491,6 +499,9 @@ typedef enum X86Seg { #define MSR_GSBASE 0xc0000101 #define MSR_KERNELGSBASE 0xc0000102 #define MSR_TSC_AUX 0xc0000103 +#define MSR_AMD64_TSC_RATIO 0xc0000104 + +#define MSR_AMD64_TSC_RATIO_DEFAULT 0x100000000ULL #define MSR_VM_HSAVE_PA 0xc0010117 @@ -570,6 +581,9 @@ typedef enum FeatureWord { FEAT_VMX_BASIC, FEAT_VMX_VMFUNC, FEAT_14_0_ECX, + FEAT_SGX_12_0_EAX, /* CPUID[EAX=0x12,ECX=0].EAX (SGX) */ + FEAT_SGX_12_0_EBX, /* CPUID[EAX=0x12,ECX=0].EBX (SGX MISCSELECT[31:0]) */ + FEAT_SGX_12_1_EAX, /* CPUID[EAX=0x12,ECX=1].EAX (SGX ATTRIBUTES[31:0]) */ FEATURE_WORDS, } FeatureWord; @@ -718,6 +732,8 @@ typedef uint64_t FeatureWordArray[FEATURE_WORDS]; /* Support RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */ #define CPUID_7_0_EBX_FSGSBASE (1U << 0) +/* Support SGX */ +#define CPUID_7_0_EBX_SGX (1U << 2) /* 1st Group of Advanced Bit Manipulation Extensions */ #define CPUID_7_0_EBX_BMI1 (1U << 3) /* Hardware Lock Elision */ @@ -805,6 +821,8 @@ typedef uint64_t FeatureWordArray[FEATURE_WORDS]; #define CPUID_7_0_ECX_MOVDIRI (1U << 27) /* Move 64 Bytes as Direct Store Instruction */ #define CPUID_7_0_ECX_MOVDIR64B (1U << 28) +/* Support SGX Launch Control */ +#define CPUID_7_0_ECX_SGX_LC (1U << 30) /* Protection Keys for Supervisor-mode Pages */ #define CPUID_7_0_ECX_PKS (1U << 31) @@ -1041,6 +1059,7 @@ typedef uint64_t FeatureWordArray[FEATURE_WORDS]; #define HYPERV_FEAT_EVMCS 12 #define HYPERV_FEAT_IPI 13 #define HYPERV_FEAT_STIMER_DIRECT 14 +#define HYPERV_FEAT_AVIC 15 #ifndef HYPERV_SPINLOCK_NEVER_NOTIFY #define HYPERV_SPINLOCK_NEVER_NOTIFY 0xFFFFFFFF @@ -1501,6 +1520,7 @@ typedef struct CPUX86State { uint64_t mcg_status; uint64_t msr_ia32_misc_enable; uint64_t msr_ia32_feature_control; + uint64_t msr_ia32_sgxlepubkeyhash[4]; uint64_t msr_fixed_ctr_ctrl; uint64_t msr_global_ctrl; @@ -1519,6 +1539,7 @@ typedef struct CPUX86State { uint32_t tsx_ctrl; uint64_t spec_ctrl; + uint64_t amd_tsc_scale_msr; uint64_t virt_ssbd; /* End of state preserved by INIT (dummy marker). */ @@ -1700,9 +1721,15 @@ struct X86CPU { OnOffAuto hyperv_no_nonarch_cs; uint32_t hyperv_vendor_id[3]; uint32_t hyperv_interface_id[4]; - uint32_t hyperv_version_id[4]; uint32_t hyperv_limits[3]; uint32_t hyperv_nested[4]; + bool hyperv_enforce_cpuid; + uint32_t hyperv_ver_id_build; + uint16_t hyperv_ver_id_major; + uint16_t hyperv_ver_id_minor; + uint32_t hyperv_ver_id_sp; + uint8_t hyperv_ver_id_sb; + uint32_t hyperv_ver_id_sn; bool check_cpuid; bool enforce_cpuid; @@ -1786,6 +1813,9 @@ struct X86CPU { /* Stop SMI delivery for migration compatibility with old machines */ bool kvm_no_smi_migration; + /* Forcefully disable KVM PV features not exposed in guest CPUIDs */ + bool kvm_pv_enforce_cpuid; + /* Number of physical address bits supported */ uint32_t phys_bits; diff --git a/target/i386/hax/hax-mem.c b/target/i386/hax/hax-mem.c index 8d44edbffd..a226d174d8 100644 --- a/target/i386/hax/hax-mem.c +++ b/target/i386/hax/hax-mem.c @@ -285,6 +285,7 @@ static void hax_log_sync(MemoryListener *listener, } static MemoryListener hax_memory_listener = { + .name = "hax", .begin = hax_transaction_begin, .commit = hax_transaction_commit, .region_add = hax_region_add, diff --git a/target/i386/helper.h b/target/i386/helper.h index 574ff75615..ac3b4d1ee3 100644 --- a/target/i386/helper.h +++ b/target/i386/helper.h @@ -56,7 +56,6 @@ DEF_HELPER_2(syscall, void, env, int) DEF_HELPER_2(sysret, void, env, int) #endif DEF_HELPER_FLAGS_2(pause, TCG_CALL_NO_WG, noreturn, env, int) -DEF_HELPER_FLAGS_1(debug, TCG_CALL_NO_WG, noreturn, env) DEF_HELPER_1(reset_rf, void, env) DEF_HELPER_FLAGS_3(raise_interrupt, TCG_CALL_NO_WG, noreturn, env, int, int) DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_WG, noreturn, env, int) diff --git a/target/i386/kvm/hyperv-proto.h b/target/i386/kvm/hyperv-proto.h index 5fbb385cc1..89f81afda7 100644 --- a/target/i386/kvm/hyperv-proto.h +++ b/target/i386/kvm/hyperv-proto.h @@ -66,6 +66,7 @@ #define HV_APIC_ACCESS_RECOMMENDED (1u << 3) #define HV_SYSTEM_RESET_RECOMMENDED (1u << 4) #define HV_RELAXED_TIMING_RECOMMENDED (1u << 5) +#define HV_DEPRECATING_AEOI_RECOMMENDED (1u << 9) #define HV_CLUSTER_IPI_RECOMMENDED (1u << 10) #define HV_EX_PROCESSOR_MASKS_RECOMMENDED (1u << 11) #define HV_ENLIGHTENED_VMCS_RECOMMENDED (1u << 14) diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index 500d2e0e68..5a698bde19 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -28,7 +28,7 @@ #include "sysemu/kvm_int.h" #include "sysemu/runstate.h" #include "kvm_i386.h" -#include "sev_i386.h" +#include "sev.h" #include "hyperv.h" #include "hyperv-proto.h" @@ -44,7 +44,6 @@ #include "hw/i386/intel_iommu.h" #include "hw/i386/x86-iommu.h" #include "hw/i386/e820_memory_layout.h" -#include "sysemu/sev.h" #include "hw/pci/pci.h" #include "hw/pci/msi.h" @@ -106,6 +105,7 @@ static bool has_msr_hv_reenlightenment; static bool has_msr_xss; static bool has_msr_umwait; static bool has_msr_spec_ctrl; +static bool has_tsc_scale_msr; static bool has_msr_tsx_ctrl; static bool has_msr_virt_ssbd; static bool has_msr_smi_count; @@ -821,9 +821,7 @@ static struct { .desc = "virtual APIC (hv-vapic)", .flags = { {.func = HV_CPUID_FEATURES, .reg = R_EAX, - .bits = HV_APIC_ACCESS_AVAILABLE}, - {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, - .bits = HV_APIC_ACCESS_RECOMMENDED} + .bits = HV_APIC_ACCESS_AVAILABLE} } }, [HYPERV_FEAT_TIME] = { @@ -926,6 +924,13 @@ static struct { }, .dependencies = BIT(HYPERV_FEAT_STIMER) }, + [HYPERV_FEAT_AVIC] = { + .desc = "AVIC/APICv support (hv-avic/hv-apicv)", + .flags = { + {.func = HV_CPUID_ENLIGHTMENT_INFO, .reg = R_EAX, + .bits = HV_DEPRECATING_AEOI_RECOMMENDED} + } + }, }; static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max, @@ -1253,14 +1258,18 @@ bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp) cpu->hyperv_interface_id[3] = hv_cpuid_get_host(cs, HV_CPUID_INTERFACE, R_EDX); - cpu->hyperv_version_id[0] = + cpu->hyperv_ver_id_build = hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EAX); - cpu->hyperv_version_id[1] = - hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX); - cpu->hyperv_version_id[2] = + cpu->hyperv_ver_id_major = + hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX) >> 16; + cpu->hyperv_ver_id_minor = + hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EBX) & 0xffff; + cpu->hyperv_ver_id_sp = hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_ECX); - cpu->hyperv_version_id[3] = - hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX); + cpu->hyperv_ver_id_sb = + hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX) >> 24; + cpu->hyperv_ver_id_sn = + hv_cpuid_get_host(cs, HV_CPUID_VERSION, R_EDX) & 0xffffff; cpu->hv_max_vps = hv_cpuid_get_host(cs, HV_CPUID_IMPLEMENT_LIMITS, R_EAX); @@ -1346,10 +1355,12 @@ static int hyperv_fill_cpuids(CPUState *cs, c = &cpuid_ent[cpuid_i++]; c->function = HV_CPUID_VERSION; - c->eax = cpu->hyperv_version_id[0]; - c->ebx = cpu->hyperv_version_id[1]; - c->ecx = cpu->hyperv_version_id[2]; - c->edx = cpu->hyperv_version_id[3]; + c->eax = cpu->hyperv_ver_id_build; + c->ebx = (uint32_t)cpu->hyperv_ver_id_major << 16 | + cpu->hyperv_ver_id_minor; + c->ecx = cpu->hyperv_ver_id_sp; + c->edx = (uint32_t)cpu->hyperv_ver_id_sb << 24 | + (cpu->hyperv_ver_id_sn & 0xffffff); c = &cpuid_ent[cpuid_i++]; c->function = HV_CPUID_FEATURES; @@ -1366,6 +1377,7 @@ static int hyperv_fill_cpuids(CPUState *cs, c->ebx |= HV_POST_MESSAGES | HV_SIGNAL_EVENTS; } + /* Not exposed by KVM but needed to make CPU hotplug in Windows work */ c->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE; @@ -1374,6 +1386,11 @@ static int hyperv_fill_cpuids(CPUState *cs, c->eax = hv_build_cpuid_leaf(cs, HV_CPUID_ENLIGHTMENT_INFO, R_EAX); c->ebx = cpu->hyperv_spinlock_attempts; + if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC) && + !hyperv_feat_enabled(cpu, HYPERV_FEAT_AVIC)) { + c->eax |= HV_APIC_ACCESS_RECOMMENDED; + } + if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) { c->eax |= HV_NO_NONARCH_CORESHARING; } else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) { @@ -1531,6 +1548,15 @@ static int hyperv_init_vcpu(X86CPU *cpu) cpu->hyperv_nested[0] = evmcs_version; } + if (cpu->hyperv_enforce_cpuid) { + ret = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENFORCE_CPUID, 0, 1); + if (ret < 0) { + error_report("failed to enable KVM_CAP_HYPERV_ENFORCE_CPUID: %s", + strerror(-ret)); + return ret; + } + } + return 0; } @@ -1629,6 +1655,16 @@ int kvm_arch_init_vcpu(CPUState *cs) cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused); + if (cpu->kvm_pv_enforce_cpuid) { + r = kvm_vcpu_enable_cap(cs, KVM_CAP_ENFORCE_PV_FEATURE_CPUID, 0, 1); + if (r < 0) { + fprintf(stderr, + "failed to enable KVM_CAP_ENFORCE_PV_FEATURE_CPUID: %s", + strerror(-r)); + abort(); + } + } + for (i = 0; i <= limit; i++) { if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { fprintf(stderr, "unsupported level value: 0x%x\n", limit); @@ -1703,6 +1739,25 @@ int kvm_arch_init_vcpu(CPUState *cs) } break; case 0x7: + case 0x12: + for (j = 0; ; j++) { + c->function = i; + c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; + c->index = j; + cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); + + if (j > 1 && (c->eax & 0xf) != 1) { + break; + } + + if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { + fprintf(stderr, "cpuid_data is full, no space for " + "cpuid(eax:0x12,ecx:0x%x)\n", j); + abort(); + } + c = &cpuid_data.entries[cpuid_i++]; + } + break; case 0x14: { uint32_t times; @@ -1877,6 +1932,11 @@ int kvm_arch_init_vcpu(CPUState *cs) !!(c->ecx & CPUID_EXT_SMX); } + c = cpuid_find_entry(&cpuid_data.cpuid, 7, 0); + if (c && (c->ebx & CPUID_7_0_EBX_SGX)) { + has_msr_feature_control = true; + } + if (env->mcg_cap & MCG_LMCE_P) { has_msr_mcg_ext_ctl = has_msr_feature_control = true; } @@ -2157,6 +2217,9 @@ static int kvm_get_supported_msrs(KVMState *s) case MSR_IA32_SPEC_CTRL: has_msr_spec_ctrl = true; break; + case MSR_AMD64_TSC_RATIO: + has_tsc_scale_msr = true; + break; case MSR_IA32_TSX_CTRL: has_msr_tsx_ctrl = true; break; @@ -2224,7 +2287,7 @@ static void register_smram_listener(Notifier *n, void *unused) address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM"); kvm_memory_listener_register(kvm_state, &smram_listener, - &smram_address_space, 1); + &smram_address_space, 1, "kvm-smram"); } int kvm_arch_init(MachineState *ms, KVMState *s) @@ -2913,6 +2976,10 @@ static int kvm_put_msrs(X86CPU *cpu, int level) if (has_msr_spec_ctrl) { kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl); } + if (has_tsc_scale_msr) { + kvm_msr_entry_add(cpu, MSR_AMD64_TSC_RATIO, env->amd_tsc_scale_msr); + } + if (has_msr_tsx_ctrl) { kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, env->tsx_ctrl); } @@ -3107,6 +3174,17 @@ static int kvm_put_msrs(X86CPU *cpu, int level) } } + if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC) { + kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH0, + env->msr_ia32_sgxlepubkeyhash[0]); + kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH1, + env->msr_ia32_sgxlepubkeyhash[1]); + kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH2, + env->msr_ia32_sgxlepubkeyhash[2]); + kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3, + env->msr_ia32_sgxlepubkeyhash[3]); + } + /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see * kvm_put_msr_feature_control. */ } @@ -3307,6 +3385,10 @@ static int kvm_get_msrs(X86CPU *cpu) if (has_msr_spec_ctrl) { kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0); } + if (has_tsc_scale_msr) { + kvm_msr_entry_add(cpu, MSR_AMD64_TSC_RATIO, 0); + } + if (has_msr_tsx_ctrl) { kvm_msr_entry_add(cpu, MSR_IA32_TSX_CTRL, 0); } @@ -3446,6 +3528,13 @@ static int kvm_get_msrs(X86CPU *cpu) } } + if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC) { + kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH0, 0); + kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH1, 0); + kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH2, 0); + kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3, 0); + } + ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf); if (ret < 0) { return ret; @@ -3711,6 +3800,9 @@ static int kvm_get_msrs(X86CPU *cpu) case MSR_IA32_SPEC_CTRL: env->spec_ctrl = msrs[i].data; break; + case MSR_AMD64_TSC_RATIO: + env->amd_tsc_scale_msr = msrs[i].data; + break; case MSR_IA32_TSX_CTRL: env->tsx_ctrl = msrs[i].data; break; @@ -3735,6 +3827,10 @@ static int kvm_get_msrs(X86CPU *cpu) case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: env->msr_rtit_addrs[index - MSR_IA32_RTIT_ADDR0_A] = msrs[i].data; break; + case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3: + env->msr_ia32_sgxlepubkeyhash[index - MSR_IA32_SGXLEPUBKEYHASH0] = + msrs[i].data; + break; } } @@ -4617,6 +4713,35 @@ void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) } } +static bool has_sgx_provisioning; + +static bool __kvm_enable_sgx_provisioning(KVMState *s) +{ + int fd, ret; + + if (!kvm_vm_check_extension(s, KVM_CAP_SGX_ATTRIBUTE)) { + return false; + } + + fd = qemu_open_old("/dev/sgx_provision", O_RDONLY); + if (fd < 0) { + return false; + } + + ret = kvm_vm_enable_cap(s, KVM_CAP_SGX_ATTRIBUTE, 0, fd); + if (ret) { + error_report("Could not enable SGX PROVISIONKEY: %s", strerror(-ret)); + exit(1); + } + close(fd); + return true; +} + +bool kvm_enable_sgx_provisioning(KVMState *s) +{ + return MEMORIZE(__kvm_enable_sgx_provisioning(s), has_sgx_provisioning); +} + static bool host_supports_vmx(void) { uint32_t ecx, unused; diff --git a/target/i386/kvm/kvm_i386.h b/target/i386/kvm/kvm_i386.h index 54667b35f0..a978509d50 100644 --- a/target/i386/kvm/kvm_i386.h +++ b/target/i386/kvm/kvm_i386.h @@ -51,4 +51,6 @@ bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp); uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address); +bool kvm_enable_sgx_provisioning(KVMState *s); + #endif diff --git a/target/i386/kvm/meson.build b/target/i386/kvm/meson.build index 0a533411ca..736df8b72e 100644 --- a/target/i386/kvm/meson.build +++ b/target/i386/kvm/meson.build @@ -1,8 +1,14 @@ i386_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c')) -i386_softmmu_ss.add(when: 'CONFIG_KVM', if_true: files( +i386_softmmu_kvm_ss = ss.source_set() + +i386_softmmu_kvm_ss.add(files( 'kvm.c', 'kvm-cpu.c', )) +i386_softmmu_kvm_ss.add(when: 'CONFIG_SEV', if_false: files('sev-stub.c')) + i386_softmmu_ss.add(when: 'CONFIG_HYPERV', if_true: files('hyperv.c'), if_false: files('hyperv-stub.c')) + +i386_softmmu_ss.add_all(when: 'CONFIG_KVM', if_true: i386_softmmu_kvm_ss) diff --git a/accel/kvm/sev-stub.c b/target/i386/kvm/sev-stub.c similarity index 94% rename from accel/kvm/sev-stub.c rename to target/i386/kvm/sev-stub.c index 9587d1b2a3..6080c007a2 100644 --- a/accel/kvm/sev-stub.c +++ b/target/i386/kvm/sev-stub.c @@ -13,7 +13,7 @@ #include "qemu/osdep.h" #include "qemu-common.h" -#include "sysemu/sev.h" +#include "sev.h" int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) { diff --git a/target/i386/machine.c b/target/i386/machine.c index b0943118d1..83c2b91529 100644 --- a/target/i386/machine.c +++ b/target/i386/machine.c @@ -1280,6 +1280,27 @@ static const VMStateDescription vmstate_spec_ctrl = { } }; + +static bool amd_tsc_scale_msr_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + + return (env->features[FEAT_SVM] & CPUID_SVM_TSCSCALE); +} + +static const VMStateDescription amd_tsc_scale_msr_ctrl = { + .name = "cpu/amd_tsc_scale_msr", + .version_id = 1, + .minimum_version_id = 1, + .needed = amd_tsc_scale_msr_needed, + .fields = (VMStateField[]){ + VMSTATE_UINT64(env.amd_tsc_scale_msr, X86CPU), + VMSTATE_END_OF_LIST() + } +}; + + static bool intel_pt_enable_needed(void *opaque) { X86CPU *cpu = opaque; @@ -1415,6 +1436,25 @@ static const VMStateDescription vmstate_msr_tsx_ctrl = { } }; +static bool intel_sgx_msrs_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + + return !!(env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_SGX_LC); +} + +static const VMStateDescription vmstate_msr_intel_sgx = { + .name = "cpu/intel_sgx", + .version_id = 1, + .minimum_version_id = 1, + .needed = intel_sgx_msrs_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64_ARRAY(env.msr_ia32_sgxlepubkeyhash, X86CPU, 4), + VMSTATE_END_OF_LIST() + } +}; + const VMStateDescription vmstate_x86_cpu = { .name = "cpu", .version_id = 12, @@ -1539,6 +1579,7 @@ const VMStateDescription vmstate_x86_cpu = { &vmstate_pkru, &vmstate_pkrs, &vmstate_spec_ctrl, + &amd_tsc_scale_msr_ctrl, &vmstate_mcg_ext_ctl, &vmstate_msr_intel_pt, &vmstate_msr_virt_ssbd, @@ -1551,6 +1592,7 @@ const VMStateDescription vmstate_x86_cpu = { &vmstate_nested_state, #endif &vmstate_msr_tsx_ctrl, + &vmstate_msr_intel_sgx, NULL } }; diff --git a/target/i386/meson.build b/target/i386/meson.build index dac19ec00d..ae38dc9563 100644 --- a/target/i386/meson.build +++ b/target/i386/meson.build @@ -6,7 +6,7 @@ i386_ss.add(files( 'xsave_helper.c', 'cpu-dump.c', )) -i386_ss.add(when: 'CONFIG_SEV', if_true: files('host-cpu.c', 'sev.c'), if_false: files('sev-stub.c')) +i386_ss.add(when: 'CONFIG_SEV', if_true: files('host-cpu.c')) # x86 cpu type i386_ss.add(when: 'CONFIG_KVM', if_true: files('host-cpu.c')) @@ -20,6 +20,8 @@ i386_softmmu_ss.add(files( 'monitor.c', 'cpu-sysemu.c', )) +i386_softmmu_ss.add(when: 'CONFIG_SEV', if_true: files('sev.c'), if_false: files('sev-sysemu-stub.c')) + i386_user_ss = ss.source_set() subdir('kvm') diff --git a/target/i386/monitor.c b/target/i386/monitor.c index 119211f0b0..8e4b4d600c 100644 --- a/target/i386/monitor.c +++ b/target/i386/monitor.c @@ -28,10 +28,9 @@ #include "monitor/hmp-target.h" #include "monitor/hmp.h" #include "qapi/qmp/qdict.h" +#include "qapi/qmp/qerror.h" #include "sysemu/kvm.h" -#include "sysemu/sev.h" #include "qapi/error.h" -#include "sev_i386.h" #include "qapi/qapi-commands-misc-target.h" #include "qapi/qapi-commands-misc.h" #include "hw/i386/pc.h" @@ -668,98 +667,3 @@ void hmp_info_local_apic(Monitor *mon, const QDict *qdict) } x86_cpu_dump_local_apic_state(cs, CPU_DUMP_FPU); } - -void hmp_info_io_apic(Monitor *mon, const QDict *qdict) -{ - monitor_printf(mon, "This command is obsolete and will be " - "removed soon. Please use 'info pic' instead.\n"); -} - -SevInfo *qmp_query_sev(Error **errp) -{ - SevInfo *info; - - info = sev_get_info(); - if (!info) { - error_setg(errp, "SEV feature is not available"); - return NULL; - } - - return info; -} - -void hmp_info_sev(Monitor *mon, const QDict *qdict) -{ - SevInfo *info = sev_get_info(); - - if (info && info->enabled) { - monitor_printf(mon, "handle: %d\n", info->handle); - monitor_printf(mon, "state: %s\n", SevState_str(info->state)); - monitor_printf(mon, "build: %d\n", info->build_id); - monitor_printf(mon, "api version: %d.%d\n", - info->api_major, info->api_minor); - monitor_printf(mon, "debug: %s\n", - info->policy & SEV_POLICY_NODBG ? "off" : "on"); - monitor_printf(mon, "key-sharing: %s\n", - info->policy & SEV_POLICY_NOKS ? "off" : "on"); - } else { - monitor_printf(mon, "SEV is not enabled\n"); - } - - qapi_free_SevInfo(info); -} - -SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp) -{ - char *data; - SevLaunchMeasureInfo *info; - - data = sev_get_launch_measurement(); - if (!data) { - error_setg(errp, "Measurement is not available"); - return NULL; - } - - info = g_malloc0(sizeof(*info)); - info->data = data; - - return info; -} - -SevCapability *qmp_query_sev_capabilities(Error **errp) -{ - return sev_get_capabilities(errp); -} - -#define SEV_SECRET_GUID "4c2eb361-7d9b-4cc3-8081-127c90d3d294" -struct sev_secret_area { - uint32_t base; - uint32_t size; -}; - -void qmp_sev_inject_launch_secret(const char *packet_hdr, - const char *secret, - bool has_gpa, uint64_t gpa, - Error **errp) -{ - if (!has_gpa) { - uint8_t *data; - struct sev_secret_area *area; - - if (!pc_system_ovmf_table_find(SEV_SECRET_GUID, &data, NULL)) { - error_setg(errp, "SEV: no secret area found in OVMF," - " gpa must be specified."); - return; - } - area = (struct sev_secret_area *)data; - gpa = area->base; - } - - sev_inject_launch_secret(packet_hdr, secret, gpa, errp); -} - -SevAttestationReport * -qmp_query_sev_attestation_report(const char *mnonce, Error **errp) -{ - return sev_get_attestation_report(mnonce, errp); -} diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index a488b00e90..14c996f968 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -1123,6 +1123,7 @@ nvmm_log_sync(MemoryListener *listener, MemoryRegionSection *section) } static MemoryListener nvmm_memory_listener = { + .name = "nvmm", .begin = nvmm_transaction_begin, .commit = nvmm_transaction_commit, .region_add = nvmm_region_add, diff --git a/target/i386/sev-stub.c b/target/i386/sev-stub.c deleted file mode 100644 index 0227cb5177..0000000000 --- a/target/i386/sev-stub.c +++ /dev/null @@ -1,83 +0,0 @@ -/* - * QEMU SEV stub - * - * Copyright Advanced Micro Devices 2018 - * - * Authors: - * Brijesh Singh - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -#include "qemu/osdep.h" -#include "sev_i386.h" - -SevInfo *sev_get_info(void) -{ - return NULL; -} - -bool sev_enabled(void) -{ - return false; -} - -uint64_t sev_get_me_mask(void) -{ - return ~0; -} - -uint32_t sev_get_cbit_position(void) -{ - return 0; -} - -uint32_t sev_get_reduced_phys_bits(void) -{ - return 0; -} - -char *sev_get_launch_measurement(void) -{ - return NULL; -} - -SevCapability *sev_get_capabilities(Error **errp) -{ - error_setg(errp, "SEV is not available in this QEMU"); - return NULL; -} - -int sev_inject_launch_secret(const char *hdr, const char *secret, - uint64_t gpa, Error **errp) -{ - return 1; -} - -int sev_encrypt_flash(uint8_t *ptr, uint64_t len, Error **errp) -{ - return 0; -} - -bool sev_es_enabled(void) -{ - return false; -} - -void sev_es_set_reset_vector(CPUState *cpu) -{ -} - -int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size) -{ - abort(); -} - -SevAttestationReport * -sev_get_attestation_report(const char *mnonce, Error **errp) -{ - error_setg(errp, "SEV is not available in this QEMU"); - return NULL; -} diff --git a/target/i386/sev-sysemu-stub.c b/target/i386/sev-sysemu-stub.c new file mode 100644 index 0000000000..7a29295d1e --- /dev/null +++ b/target/i386/sev-sysemu-stub.c @@ -0,0 +1,70 @@ +/* + * QEMU SEV system stub + * + * Copyright Advanced Micro Devices 2018 + * + * Authors: + * Brijesh Singh + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "monitor/monitor.h" +#include "monitor/hmp-target.h" +#include "qapi/qapi-commands-misc-target.h" +#include "qapi/qmp/qerror.h" +#include "qapi/error.h" +#include "sev.h" + +SevInfo *qmp_query_sev(Error **errp) +{ + error_setg(errp, "SEV is not available in this QEMU"); + return NULL; +} + +SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp) +{ + error_setg(errp, "SEV is not available in this QEMU"); + return NULL; +} + +SevCapability *qmp_query_sev_capabilities(Error **errp) +{ + error_setg(errp, "SEV is not available in this QEMU"); + return NULL; +} + +void qmp_sev_inject_launch_secret(const char *packet_header, const char *secret, + bool has_gpa, uint64_t gpa, Error **errp) +{ + error_setg(errp, "SEV is not available in this QEMU"); +} + +int sev_encrypt_flash(uint8_t *ptr, uint64_t len, Error **errp) +{ + g_assert_not_reached(); +} + +void sev_es_set_reset_vector(CPUState *cpu) +{ +} + +int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size) +{ + g_assert_not_reached(); +} + +SevAttestationReport *qmp_query_sev_attestation_report(const char *mnonce, + Error **errp) +{ + error_setg(errp, "SEV is not available in this QEMU"); + return NULL; +} + +void hmp_info_sev(Monitor *mon, const QDict *qdict) +{ + monitor_printf(mon, "SEV is not available in this QEMU\n"); +} diff --git a/target/i386/sev.c b/target/i386/sev.c index 0b2c8f594a..eede07f11d 100644 --- a/target/i386/sev.c +++ b/target/i386/sev.c @@ -23,14 +23,18 @@ #include "qemu/base64.h" #include "qemu/module.h" #include "qemu/uuid.h" +#include "crypto/hash.h" #include "sysemu/kvm.h" -#include "sev_i386.h" +#include "sev.h" #include "sysemu/sysemu.h" #include "sysemu/runstate.h" #include "trace.h" #include "migration/blocker.h" #include "qom/object.h" #include "monitor/monitor.h" +#include "monitor/hmp-target.h" +#include "qapi/qapi-commands-misc-target.h" +#include "qapi/qmp/qerror.h" #include "exec/confidential-guest-support.h" #include "hw/i386/pc.h" @@ -64,7 +68,6 @@ struct SevGuestState { uint8_t api_major; uint8_t api_minor; uint8_t build_id; - uint64_t me_mask; int sev_fd; SevState state; gchar *measurement; @@ -83,6 +86,32 @@ typedef struct __attribute__((__packed__)) SevInfoBlock { uint32_t reset_addr; } SevInfoBlock; +#define SEV_HASH_TABLE_RV_GUID "7255371f-3a3b-4b04-927b-1da6efa8d454" +typedef struct QEMU_PACKED SevHashTableDescriptor { + /* SEV hash table area guest address */ + uint32_t base; + /* SEV hash table area size (in bytes) */ + uint32_t size; +} SevHashTableDescriptor; + +/* hard code sha256 digest size */ +#define HASH_SIZE 32 + +typedef struct QEMU_PACKED SevHashTableEntry { + QemuUUID guid; + uint16_t len; + uint8_t hash[HASH_SIZE]; +} SevHashTableEntry; + +typedef struct QEMU_PACKED SevHashTable { + QemuUUID guid; + uint16_t len; + SevHashTableEntry cmdline; + SevHashTableEntry initrd; + SevHashTableEntry kernel; + uint8_t padding[]; +} SevHashTable; + static SevGuestState *sev_guest; static Error *sev_mig_blocker; @@ -362,12 +391,6 @@ sev_es_enabled(void) return sev_enabled() && (sev_guest->policy & SEV_POLICY_ES); } -uint64_t -sev_get_me_mask(void) -{ - return sev_guest ? sev_guest->me_mask : ~0; -} - uint32_t sev_get_cbit_position(void) { @@ -380,8 +403,7 @@ sev_get_reduced_phys_bits(void) return sev_guest ? sev_guest->reduced_phys_bits : 0; } -SevInfo * -sev_get_info(void) +static SevInfo *sev_get_info(void) { SevInfo *info; @@ -400,6 +422,40 @@ sev_get_info(void) return info; } +SevInfo *qmp_query_sev(Error **errp) +{ + SevInfo *info; + + info = sev_get_info(); + if (!info) { + error_setg(errp, "SEV feature is not available"); + return NULL; + } + + return info; +} + +void hmp_info_sev(Monitor *mon, const QDict *qdict) +{ + SevInfo *info = sev_get_info(); + + if (info && info->enabled) { + monitor_printf(mon, "handle: %d\n", info->handle); + monitor_printf(mon, "state: %s\n", SevState_str(info->state)); + monitor_printf(mon, "build: %d\n", info->build_id); + monitor_printf(mon, "api version: %d.%d\n", + info->api_major, info->api_minor); + monitor_printf(mon, "debug: %s\n", + info->policy & SEV_POLICY_NODBG ? "off" : "on"); + monitor_printf(mon, "key-sharing: %s\n", + info->policy & SEV_POLICY_NOKS ? "off" : "on"); + } else { + monitor_printf(mon, "SEV is not enabled\n"); + } + + qapi_free_SevInfo(info); +} + static int sev_get_pdh_info(int fd, guchar **pdh, size_t *pdh_len, guchar **cert_chain, size_t *cert_chain_len, Error **errp) @@ -413,7 +469,8 @@ sev_get_pdh_info(int fd, guchar **pdh, size_t *pdh_len, guchar **cert_chain, r = sev_platform_ioctl(fd, SEV_PDH_CERT_EXPORT, &export, &err); if (r < 0) { if (err != SEV_RET_INVALID_LEN) { - error_setg(errp, "failed to export PDH cert ret=%d fw_err=%d (%s)", + error_setg(errp, "SEV: Failed to export PDH cert" + " ret=%d fw_err=%d (%s)", r, err, fw_error_to_str(err)); return 1; } @@ -426,7 +483,7 @@ sev_get_pdh_info(int fd, guchar **pdh, size_t *pdh_len, guchar **cert_chain, r = sev_platform_ioctl(fd, SEV_PDH_CERT_EXPORT, &export, &err); if (r < 0) { - error_setg(errp, "failed to export PDH cert ret=%d fw_err=%d (%s)", + error_setg(errp, "SEV: Failed to export PDH cert ret=%d fw_err=%d (%s)", r, err, fw_error_to_str(err)); goto e_free; } @@ -443,8 +500,7 @@ e_free: return 1; } -SevCapability * -sev_get_capabilities(Error **errp) +static SevCapability *sev_get_capabilities(Error **errp) { SevCapability *cap = NULL; guchar *pdh_data = NULL; @@ -464,7 +520,7 @@ sev_get_capabilities(Error **errp) fd = open(DEFAULT_SEV_DEVICE, O_RDWR); if (fd < 0) { - error_setg_errno(errp, errno, "Failed to open %s", + error_setg_errno(errp, errno, "SEV: Failed to open %s", DEFAULT_SEV_DEVICE); return NULL; } @@ -494,14 +550,19 @@ out: return cap; } -SevAttestationReport * -sev_get_attestation_report(const char *mnonce, Error **errp) +SevCapability *qmp_query_sev_capabilities(Error **errp) +{ + return sev_get_capabilities(errp); +} + +static SevAttestationReport *sev_get_attestation_report(const char *mnonce, + Error **errp) { struct kvm_sev_attestation_report input = {}; SevAttestationReport *report = NULL; SevGuestState *sev = sev_guest; - guchar *data; - guchar *buf; + g_autofree guchar *data = NULL; + g_autofree guchar *buf = NULL; gsize len; int err = 0, ret; @@ -521,7 +582,6 @@ sev_get_attestation_report(const char *mnonce, Error **errp) if (len != sizeof(input.mnonce)) { error_setg(errp, "SEV: mnonce must be %zu bytes (got %" G_GSIZE_FORMAT ")", sizeof(input.mnonce), len); - g_free(buf); return NULL; } @@ -530,9 +590,9 @@ sev_get_attestation_report(const char *mnonce, Error **errp) &input, &err); if (ret < 0) { if (err != SEV_RET_INVALID_LEN) { - error_setg(errp, "failed to query the attestation report length " - "ret=%d fw_err=%d (%s)", ret, err, fw_error_to_str(err)); - g_free(buf); + error_setg(errp, "SEV: Failed to query the attestation report" + " length ret=%d fw_err=%d (%s)", + ret, err, fw_error_to_str(err)); return NULL; } } @@ -545,9 +605,9 @@ sev_get_attestation_report(const char *mnonce, Error **errp) ret = sev_ioctl(sev->sev_fd, KVM_SEV_GET_ATTESTATION_REPORT, &input, &err); if (ret) { - error_setg_errno(errp, errno, "Failed to get attestation report" + error_setg_errno(errp, errno, "SEV: Failed to get attestation report" " ret=%d fw_err=%d (%s)", ret, err, fw_error_to_str(err)); - goto e_free_data; + return NULL; } report = g_new0(SevAttestationReport, 1); @@ -555,21 +615,24 @@ sev_get_attestation_report(const char *mnonce, Error **errp) trace_kvm_sev_attestation_report(mnonce, report->data); -e_free_data: - g_free(data); - g_free(buf); return report; } +SevAttestationReport *qmp_query_sev_attestation_report(const char *mnonce, + Error **errp) +{ + return sev_get_attestation_report(mnonce, errp); +} + static int sev_read_file_base64(const char *filename, guchar **data, gsize *len) { gsize sz; - gchar *base64; + g_autofree gchar *base64 = NULL; GError *error = NULL; if (!g_file_get_contents(filename, &base64, &sz, &error)) { - error_report("failed to read '%s' (%s)", filename, error->message); + error_report("SEV: Failed to read '%s' (%s)", filename, error->message); g_error_free(error); return -1; } @@ -584,31 +647,29 @@ sev_launch_start(SevGuestState *sev) gsize sz; int ret = 1; int fw_error, rc; - struct kvm_sev_launch_start *start; + struct kvm_sev_launch_start start = { + .handle = sev->handle, .policy = sev->policy + }; guchar *session = NULL, *dh_cert = NULL; - start = g_new0(struct kvm_sev_launch_start, 1); - - start->handle = sev->handle; - start->policy = sev->policy; if (sev->session_file) { if (sev_read_file_base64(sev->session_file, &session, &sz) < 0) { goto out; } - start->session_uaddr = (unsigned long)session; - start->session_len = sz; + start.session_uaddr = (unsigned long)session; + start.session_len = sz; } if (sev->dh_cert_file) { if (sev_read_file_base64(sev->dh_cert_file, &dh_cert, &sz) < 0) { goto out; } - start->dh_uaddr = (unsigned long)dh_cert; - start->dh_len = sz; + start.dh_uaddr = (unsigned long)dh_cert; + start.dh_len = sz; } - trace_kvm_sev_launch_start(start->policy, session, dh_cert); - rc = sev_ioctl(sev->sev_fd, KVM_SEV_LAUNCH_START, start, &fw_error); + trace_kvm_sev_launch_start(start.policy, session, dh_cert); + rc = sev_ioctl(sev->sev_fd, KVM_SEV_LAUNCH_START, &start, &fw_error); if (rc < 0) { error_report("%s: LAUNCH_START ret=%d fw_error=%d '%s'", __func__, ret, fw_error, fw_error_to_str(fw_error)); @@ -616,11 +677,10 @@ sev_launch_start(SevGuestState *sev) } sev_set_guest_state(sev, SEV_STATE_LAUNCH_UPDATE); - sev->handle = start->handle; + sev->handle = start.handle; ret = 0; out: - g_free(start); g_free(session); g_free(dh_cert); return ret; @@ -668,8 +728,8 @@ sev_launch_get_measure(Notifier *notifier, void *unused) { SevGuestState *sev = sev_guest; int ret, error; - guchar *data; - struct kvm_sev_launch_measure *measurement; + g_autofree guchar *data = NULL; + struct kvm_sev_launch_measure measurement = {}; if (!sev_check_state(sev, SEV_STATE_LAUNCH_UPDATE)) { return; @@ -683,43 +743,35 @@ sev_launch_get_measure(Notifier *notifier, void *unused) } } - measurement = g_new0(struct kvm_sev_launch_measure, 1); - /* query the measurement blob length */ ret = sev_ioctl(sev->sev_fd, KVM_SEV_LAUNCH_MEASURE, - measurement, &error); - if (!measurement->len) { + &measurement, &error); + if (!measurement.len) { error_report("%s: LAUNCH_MEASURE ret=%d fw_error=%d '%s'", __func__, ret, error, fw_error_to_str(errno)); - goto free_measurement; + return; } - data = g_new0(guchar, measurement->len); - measurement->uaddr = (unsigned long)data; + data = g_new0(guchar, measurement.len); + measurement.uaddr = (unsigned long)data; /* get the measurement blob */ ret = sev_ioctl(sev->sev_fd, KVM_SEV_LAUNCH_MEASURE, - measurement, &error); + &measurement, &error); if (ret) { error_report("%s: LAUNCH_MEASURE ret=%d fw_error=%d '%s'", __func__, ret, error, fw_error_to_str(errno)); - goto free_data; + return; } sev_set_guest_state(sev, SEV_STATE_LAUNCH_SECRET); /* encode the measurement value and emit the event */ - sev->measurement = g_base64_encode(data, measurement->len); + sev->measurement = g_base64_encode(data, measurement.len); trace_kvm_sev_launch_measurement(sev->measurement); - -free_data: - g_free(data); -free_measurement: - g_free(measurement); } -char * -sev_get_launch_measurement(void) +static char *sev_get_launch_measurement(void) { if (sev_guest && sev_guest->state >= SEV_STATE_LAUNCH_SECRET) { @@ -729,6 +781,23 @@ sev_get_launch_measurement(void) return NULL; } +SevLaunchMeasureInfo *qmp_query_sev_launch_measure(Error **errp) +{ + char *data; + SevLaunchMeasureInfo *info; + + data = sev_get_launch_measurement(); + if (!data) { + error_setg(errp, "SEV launch measurement is not available"); + return NULL; + } + + info = g_malloc0(sizeof(*info)); + info->data = data; + + return info; +} + static Notifier sev_machine_done_notify = { .notify = sev_launch_get_measure, }; @@ -804,8 +873,6 @@ int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp) goto err; } - sev->me_mask = ~(1UL << sev->cbitpos); - devname = object_property_get_str(OBJECT(sev), "sev-device", NULL); sev->sev_fd = open(devname, O_RDWR); if (sev->sev_fd < 0) { @@ -884,7 +951,7 @@ sev_encrypt_flash(uint8_t *ptr, uint64_t len, Error **errp) if (sev_check_state(sev_guest, SEV_STATE_LAUNCH_UPDATE)) { int ret = sev_launch_update_data(sev_guest, ptr, len); if (ret < 0) { - error_setg(errp, "failed to encrypt pflash rom"); + error_setg(errp, "SEV: Failed to encrypt pflash rom"); return ret; } } @@ -903,7 +970,7 @@ int sev_inject_launch_secret(const char *packet_hdr, const char *secret, MemoryRegion *mr = NULL; if (!sev_guest) { - error_setg(errp, "SEV: SEV not enabled."); + error_setg(errp, "SEV not enabled for guest"); return 1; } @@ -955,6 +1022,37 @@ int sev_inject_launch_secret(const char *packet_hdr, const char *secret, return 0; } +#define SEV_SECRET_GUID "4c2eb361-7d9b-4cc3-8081-127c90d3d294" +struct sev_secret_area { + uint32_t base; + uint32_t size; +}; + +void qmp_sev_inject_launch_secret(const char *packet_hdr, + const char *secret, + bool has_gpa, uint64_t gpa, + Error **errp) +{ + if (!sev_enabled()) { + error_setg(errp, "SEV not enabled for guest"); + return; + } + if (!has_gpa) { + uint8_t *data; + struct sev_secret_area *area; + + if (!pc_system_ovmf_table_find(SEV_SECRET_GUID, &data, NULL)) { + error_setg(errp, "SEV: no secret area found in OVMF," + " gpa must be specified."); + return; + } + area = (struct sev_secret_area *)data; + gpa = area->base; + } + + sev_inject_launch_secret(packet_hdr, secret, gpa, errp); +} + static int sev_es_parse_reset_block(SevInfoBlock *info, uint32_t *addr) { @@ -1071,6 +1169,116 @@ int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size) return 0; } +static const QemuUUID sev_hash_table_header_guid = { + .data = UUID_LE(0x9438d606, 0x4f22, 0x4cc9, 0xb4, 0x79, 0xa7, 0x93, + 0xd4, 0x11, 0xfd, 0x21) +}; + +static const QemuUUID sev_kernel_entry_guid = { + .data = UUID_LE(0x4de79437, 0xabd2, 0x427f, 0xb8, 0x35, 0xd5, 0xb1, + 0x72, 0xd2, 0x04, 0x5b) +}; +static const QemuUUID sev_initrd_entry_guid = { + .data = UUID_LE(0x44baf731, 0x3a2f, 0x4bd7, 0x9a, 0xf1, 0x41, 0xe2, + 0x91, 0x69, 0x78, 0x1d) +}; +static const QemuUUID sev_cmdline_entry_guid = { + .data = UUID_LE(0x97d02dd8, 0xbd20, 0x4c94, 0xaa, 0x78, 0xe7, 0x71, + 0x4d, 0x36, 0xab, 0x2a) +}; + +/* + * Add the hashes of the linux kernel/initrd/cmdline to an encrypted guest page + * which is included in SEV's initial memory measurement. + */ +bool sev_add_kernel_loader_hashes(SevKernelLoaderContext *ctx, Error **errp) +{ + uint8_t *data; + SevHashTableDescriptor *area; + SevHashTable *ht; + uint8_t cmdline_hash[HASH_SIZE]; + uint8_t initrd_hash[HASH_SIZE]; + uint8_t kernel_hash[HASH_SIZE]; + uint8_t *hashp; + size_t hash_len = HASH_SIZE; + int aligned_len; + + if (!pc_system_ovmf_table_find(SEV_HASH_TABLE_RV_GUID, &data, NULL)) { + error_setg(errp, "SEV: kernel specified but OVMF has no hash table guid"); + return false; + } + area = (SevHashTableDescriptor *)data; + + /* + * Calculate hash of kernel command-line with the terminating null byte. If + * the user doesn't supply a command-line via -append, the 1-byte "\0" will + * be used. + */ + hashp = cmdline_hash; + if (qcrypto_hash_bytes(QCRYPTO_HASH_ALG_SHA256, ctx->cmdline_data, + ctx->cmdline_size, &hashp, &hash_len, errp) < 0) { + return false; + } + assert(hash_len == HASH_SIZE); + + /* + * Calculate hash of initrd. If the user doesn't supply an initrd via + * -initrd, an empty buffer will be used (ctx->initrd_size == 0). + */ + hashp = initrd_hash; + if (qcrypto_hash_bytes(QCRYPTO_HASH_ALG_SHA256, ctx->initrd_data, + ctx->initrd_size, &hashp, &hash_len, errp) < 0) { + return false; + } + assert(hash_len == HASH_SIZE); + + /* Calculate hash of the kernel */ + hashp = kernel_hash; + struct iovec iov[2] = { + { .iov_base = ctx->setup_data, .iov_len = ctx->setup_size }, + { .iov_base = ctx->kernel_data, .iov_len = ctx->kernel_size } + }; + if (qcrypto_hash_bytesv(QCRYPTO_HASH_ALG_SHA256, iov, ARRAY_SIZE(iov), + &hashp, &hash_len, errp) < 0) { + return false; + } + assert(hash_len == HASH_SIZE); + + /* + * Populate the hashes table in the guest's memory at the OVMF-designated + * area for the SEV hashes table + */ + ht = qemu_map_ram_ptr(NULL, area->base); + + ht->guid = sev_hash_table_header_guid; + ht->len = sizeof(*ht); + + ht->cmdline.guid = sev_cmdline_entry_guid; + ht->cmdline.len = sizeof(ht->cmdline); + memcpy(ht->cmdline.hash, cmdline_hash, sizeof(ht->cmdline.hash)); + + ht->initrd.guid = sev_initrd_entry_guid; + ht->initrd.len = sizeof(ht->initrd); + memcpy(ht->initrd.hash, initrd_hash, sizeof(ht->initrd.hash)); + + ht->kernel.guid = sev_kernel_entry_guid; + ht->kernel.len = sizeof(ht->kernel); + memcpy(ht->kernel.hash, kernel_hash, sizeof(ht->kernel.hash)); + + /* When calling sev_encrypt_flash, the length has to be 16 byte aligned */ + aligned_len = ROUND_UP(ht->len, 16); + if (aligned_len != ht->len) { + /* zero the excess data so the measurement can be reliably calculated */ + memset(ht->padding, 0, aligned_len - ht->len); + } + + if (sev_encrypt_flash((uint8_t *)ht, aligned_len, errp) < 0) { + return false; + } + + return true; +} + static void sev_register_types(void) { diff --git a/target/i386/sev.h b/target/i386/sev.h new file mode 100644 index 0000000000..83e82aa42c --- /dev/null +++ b/target/i386/sev.h @@ -0,0 +1,62 @@ +/* + * QEMU Secure Encrypted Virutualization (SEV) support + * + * Copyright: Advanced Micro Devices, 2016-2018 + * + * Authors: + * Brijesh Singh + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_SEV_I386_H +#define QEMU_SEV_I386_H + +#ifndef CONFIG_USER_ONLY +#include CONFIG_DEVICES /* CONFIG_SEV */ +#endif + +#include "exec/confidential-guest-support.h" + +#define SEV_POLICY_NODBG 0x1 +#define SEV_POLICY_NOKS 0x2 +#define SEV_POLICY_ES 0x4 +#define SEV_POLICY_NOSEND 0x8 +#define SEV_POLICY_DOMAIN 0x10 +#define SEV_POLICY_SEV 0x20 + +typedef struct SevKernelLoaderContext { + char *setup_data; + size_t setup_size; + char *kernel_data; + size_t kernel_size; + char *initrd_data; + size_t initrd_size; + char *cmdline_data; + size_t cmdline_size; +} SevKernelLoaderContext; + +#ifdef CONFIG_SEV +bool sev_enabled(void); +bool sev_es_enabled(void); +#else +#define sev_enabled() 0 +#define sev_es_enabled() 0 +#endif + +extern uint32_t sev_get_cbit_position(void); +extern uint32_t sev_get_reduced_phys_bits(void); +extern bool sev_add_kernel_loader_hashes(SevKernelLoaderContext *ctx, Error **errp); + +int sev_encrypt_flash(uint8_t *ptr, uint64_t len, Error **errp); +int sev_inject_launch_secret(const char *hdr, const char *secret, + uint64_t gpa, Error **errp); + +int sev_es_save_reset_vector(void *flash_ptr, uint64_t flash_size); +void sev_es_set_reset_vector(CPUState *cpu); + +int sev_kvm_init(ConfidentialGuestSupport *cgs, Error **errp); + +#endif diff --git a/target/i386/sev_i386.h b/target/i386/sev_i386.h deleted file mode 100644 index ae6d840478..0000000000 --- a/target/i386/sev_i386.h +++ /dev/null @@ -1,41 +0,0 @@ -/* - * QEMU Secure Encrypted Virutualization (SEV) support - * - * Copyright: Advanced Micro Devices, 2016-2018 - * - * Authors: - * Brijesh Singh - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - * - */ - -#ifndef QEMU_SEV_I386_H -#define QEMU_SEV_I386_H - -#include "qom/object.h" -#include "qapi/error.h" -#include "sysemu/kvm.h" -#include "sysemu/sev.h" -#include "qemu/error-report.h" -#include "qapi/qapi-types-misc-target.h" - -#define SEV_POLICY_NODBG 0x1 -#define SEV_POLICY_NOKS 0x2 -#define SEV_POLICY_ES 0x4 -#define SEV_POLICY_NOSEND 0x8 -#define SEV_POLICY_DOMAIN 0x10 -#define SEV_POLICY_SEV 0x20 - -extern bool sev_es_enabled(void); -extern uint64_t sev_get_me_mask(void); -extern SevInfo *sev_get_info(void); -extern uint32_t sev_get_cbit_position(void); -extern uint32_t sev_get_reduced_phys_bits(void); -extern char *sev_get_launch_measurement(void); -extern SevCapability *sev_get_capabilities(Error **errp); -extern SevAttestationReport * -sev_get_attestation_report(const char *mnonce, Error **errp); - -#endif diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h index 60ca09e95e..0a4401e917 100644 --- a/target/i386/tcg/helper-tcg.h +++ b/target/i386/tcg/helper-tcg.h @@ -43,9 +43,15 @@ bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req); #endif /* helper.c */ +#ifdef CONFIG_USER_ONLY +void x86_cpu_record_sigsegv(CPUState *cs, vaddr addr, + MMUAccessType access_type, + bool maperr, uintptr_t ra); +#else bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); +#endif void breakpoint_handler(CPUState *cs); diff --git a/target/i386/tcg/mem_helper.c b/target/i386/tcg/mem_helper.c index 2da3cd14b6..a207e624cb 100644 --- a/target/i386/tcg/mem_helper.c +++ b/target/i386/tcg/mem_helper.c @@ -67,7 +67,7 @@ void helper_cmpxchg8b(CPUX86State *env, target_ulong a0) { uintptr_t ra = GETPC(); int mem_idx = cpu_mmu_index(env, false); - TCGMemOpIdx oi = make_memop_idx(MO_TEQ, mem_idx); + MemOpIdx oi = make_memop_idx(MO_TEQ, mem_idx); oldv = cpu_atomic_cmpxchgq_le_mmu(env, a0, cmpv, newv, oi, ra); } @@ -136,7 +136,7 @@ void helper_cmpxchg16b(CPUX86State *env, target_ulong a0) Int128 newv = int128_make128(env->regs[R_EBX], env->regs[R_ECX]); int mem_idx = cpu_mmu_index(env, false); - TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + MemOpIdx oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx); Int128 oldv = cpu_atomic_cmpxchgo_le_mmu(env, a0, cmpv, newv, oi, ra); if (int128_eq(oldv, cmpv)) { diff --git a/target/i386/tcg/misc_helper.c b/target/i386/tcg/misc_helper.c index baffa5d7ba..5769db5ace 100644 --- a/target/i386/tcg/misc_helper.c +++ b/target/i386/tcg/misc_helper.c @@ -110,14 +110,6 @@ void QEMU_NORETURN helper_pause(CPUX86State *env, int next_eip_addend) do_pause(env); } -void QEMU_NORETURN helper_debug(CPUX86State *env) -{ - CPUState *cs = env_cpu(env); - - cs->exception_index = EXCP_DEBUG; - cpu_loop_exit(cs); -} - uint64_t helper_rdpkru(CPUX86State *env, uint32_t ecx) { if ((env->cr[4] & CR4_PKE_MASK) == 0) { diff --git a/target/i386/tcg/sysemu/excp_helper.c b/target/i386/tcg/sysemu/excp_helper.c index 7af887be4d..5ba739fbed 100644 --- a/target/i386/tcg/sysemu/excp_helper.c +++ b/target/i386/tcg/sysemu/excp_helper.c @@ -90,19 +90,10 @@ static int mmu_translate(CPUState *cs, hwaddr addr, MMUTranslateFunc get_hphys_f target_ulong pdpe_addr; #ifdef TARGET_X86_64 - if (env->hflags & HF_LMA_MASK) { + if (pg_mode & PG_MODE_LMA) { bool la57 = pg_mode & PG_MODE_LA57; uint64_t pml5e_addr, pml5e; uint64_t pml4e_addr, pml4e; - int32_t sext; - - /* test virtual address sign extension */ - sext = la57 ? (int64_t)addr >> 56 : (int64_t)addr >> 47; - if (get_hphys_func && sext != 0 && sext != -1) { - env->error_code = 0; - cs->exception_index = EXCP0D_GPF; - return 1; - } if (la57) { pml5e_addr = ((cr3 & ~0xfff) + @@ -287,7 +278,7 @@ do_check_protect_pse36: *prot |= PAGE_EXEC; } - if (!(env->hflags & HF_LMA_MASK)) { + if (!(pg_mode & PG_MODE_LMA)) { pkr = 0; } else if (ptep & PG_USER_MASK) { pkr = pg_mode & PG_MODE_PKE ? env->pkru : 0; @@ -423,6 +414,18 @@ static int handle_mmu_fault(CPUState *cs, vaddr addr, int size, page_size = 4096; } else { pg_mode = get_pg_mode(env); + if (pg_mode & PG_MODE_LMA) { + int32_t sext; + + /* test virtual address sign extension */ + sext = (int64_t)addr >> (pg_mode & PG_MODE_LA57 ? 56 : 47); + if (sext != 0 && sext != -1) { + env->error_code = 0; + cs->exception_index = EXCP0D_GPF; + return 1; + } + } + error_code = mmu_translate(cs, addr, get_hphys, env->cr[3], is_write1, mmu_idx, pg_mode, &paddr, &page_size, &prot); diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c index 3ecfae34cb..6fdfdf9598 100644 --- a/target/i386/tcg/tcg-cpu.c +++ b/target/i386/tcg/tcg-cpu.c @@ -72,10 +72,11 @@ static const struct TCGCPUOps x86_tcg_ops = { .synchronize_from_tb = x86_cpu_synchronize_from_tb, .cpu_exec_enter = x86_cpu_exec_enter, .cpu_exec_exit = x86_cpu_exec_exit, - .tlb_fill = x86_cpu_tlb_fill, #ifdef CONFIG_USER_ONLY .fake_user_interrupt = x86_cpu_do_interrupt, + .record_sigsegv = x86_cpu_record_sigsegv, #else + .tlb_fill = x86_cpu_tlb_fill, .do_interrupt = x86_cpu_do_interrupt, .cpu_exec_interrupt = x86_cpu_exec_interrupt, .debug_excp_handler = breakpoint_handler, diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c index fe26b2db0c..a95a13a94e 100644 --- a/target/i386/tcg/translate.c +++ b/target/i386/tcg/translate.c @@ -2680,9 +2680,7 @@ do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr) if (s->base.tb->flags & HF_RF_MASK) { gen_helper_reset_rf(cpu_env); } - if (s->base.singlestep_enabled) { - gen_helper_debug(cpu_env); - } else if (recheck_tf) { + if (recheck_tf) { gen_helper_rechecking_single_step(cpu_env); tcg_gen_exit_tb(NULL, 0); } else if (s->flags & HF_TF_MASK) { @@ -8576,6 +8574,7 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) DisasContext *dc = container_of(dcbase, DisasContext, base); CPUX86State *env = cpu->env_ptr; uint32_t flags = dc->base.tb->flags; + uint32_t cflags = tb_cflags(dc->base.tb); int cpl = (flags >> HF_CPL_SHIFT) & 3; int iopl = (flags >> IOPL_SHIFT) & 3; @@ -8613,14 +8612,14 @@ static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX]; dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX]; dc->cpuid_xsave_features = env->features[FEAT_XSAVE]; - dc->jmp_opt = !(dc->base.singlestep_enabled || + dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) || (flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK))); /* * If jmp_opt, we want to handle each string instruction individually. * For icount also disable repz optimization so that each iteration * is accounted separately. */ - dc->repz_opt = !dc->jmp_opt && !(tb_cflags(dc->base.tb) & CF_USE_ICOUNT); + dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT); dc->T0 = tcg_temp_new(); dc->T1 = tcg_temp_new(); diff --git a/target/i386/tcg/user/excp_helper.c b/target/i386/tcg/user/excp_helper.c index a89b5228fd..cd507e2a1b 100644 --- a/target/i386/tcg/user/excp_helper.c +++ b/target/i386/tcg/user/excp_helper.c @@ -22,18 +22,29 @@ #include "exec/exec-all.h" #include "tcg/helper-tcg.h" -bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) +void x86_cpu_record_sigsegv(CPUState *cs, vaddr addr, + MMUAccessType access_type, + bool maperr, uintptr_t ra) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; + /* + * The error_code that hw reports as part of the exception frame + * is copied to linux sigcontext.err. The exception_index is + * copied to linux sigcontext.trapno. Short of inventing a new + * place to store the trapno, we cannot let our caller raise the + * signal and set exception_index to EXCP_INTERRUPT. + */ env->cr[2] = addr; - env->error_code = (access_type == MMU_DATA_STORE) << PG_ERROR_W_BIT; - env->error_code |= PG_ERROR_U_MASK; + env->error_code = ((access_type == MMU_DATA_STORE) << PG_ERROR_W_BIT) + | (maperr ? 0 : PG_ERROR_P_MASK) + | PG_ERROR_U_MASK; cs->exception_index = EXCP0E_PAGE; + + /* Disable do_interrupt_user. */ env->exception_is_int = 0; env->exception_next_eip = -1; - cpu_loop_exit_restore(cs, retaddr); + + cpu_loop_exit_restore(cs, ra); } diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index 3e925b9da7..ef896da0a2 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -1598,6 +1598,7 @@ static void whpx_log_sync(MemoryListener *listener, } static MemoryListener whpx_memory_listener = { + .name = "whpx", .begin = whpx_transaction_begin, .commit = whpx_transaction_commit, .region_add = whpx_region_add, diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c index 66d22d1189..c7aeb7da9c 100644 --- a/target/m68k/cpu.c +++ b/target/m68k/cpu.c @@ -515,9 +515,9 @@ static const struct SysemuCPUOps m68k_sysemu_ops = { static const struct TCGCPUOps m68k_tcg_ops = { .initialize = m68k_tcg_init, - .tlb_fill = m68k_cpu_tlb_fill, #ifndef CONFIG_USER_ONLY + .tlb_fill = m68k_cpu_tlb_fill, .cpu_exec_interrupt = m68k_cpu_exec_interrupt, .do_interrupt = m68k_cpu_do_interrupt, .do_transaction_failed = m68k_cpu_transaction_failed, diff --git a/target/m68k/helper.c b/target/m68k/helper.c index 137a3e1a3d..5728e48585 100644 --- a/target/m68k/helper.c +++ b/target/m68k/helper.c @@ -978,16 +978,12 @@ void m68k_set_irq_level(M68kCPU *cpu, int level, uint8_t vector) } } -#endif - bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType qemu_access_type, int mmu_idx, bool probe, uintptr_t retaddr) { M68kCPU *cpu = M68K_CPU(cs); CPUM68KState *env = &cpu->env; - -#ifndef CONFIG_USER_ONLY hwaddr physical; int prot; int access_type; @@ -1051,12 +1047,12 @@ bool m68k_cpu_tlb_fill(CPUState *cs, vaddr address, int size, if (!(access_type & ACCESS_STORE)) { env->mmu.ssw |= M68K_RW_040; } -#endif cs->exception_index = EXCP_ACCESS; env->mmu.ar = address; cpu_loop_exit_restore(cs, retaddr); } +#endif /* !CONFIG_USER_ONLY */ uint32_t HELPER(bitrev)(uint32_t x) { diff --git a/target/m68k/op_helper.c b/target/m68k/op_helper.c index 5d624838ae..cfbc987ba6 100644 --- a/target/m68k/op_helper.c +++ b/target/m68k/op_helper.c @@ -22,7 +22,6 @@ #include "exec/exec-all.h" #include "exec/cpu_ldst.h" #include "semihosting/semihost.h" -#include "tcg/tcg.h" #if !defined(CONFIG_USER_ONLY) @@ -775,7 +774,7 @@ static void do_cas2l(CPUM68KState *env, uint32_t regs, uint32_t a1, uint32_t a2, uintptr_t ra = GETPC(); #if defined(CONFIG_ATOMIC64) int mmu_idx = cpu_mmu_index(env, 0); - TCGMemOpIdx oi = make_memop_idx(MO_BEQ, mmu_idx); + MemOpIdx oi = make_memop_idx(MO_BEQ, mmu_idx); #endif if (parallel) { diff --git a/target/m68k/translate.c b/target/m68k/translate.c index 50a55f949c..af43c8eab8 100644 --- a/target/m68k/translate.c +++ b/target/m68k/translate.c @@ -194,18 +194,6 @@ static void do_writebacks(DisasContext *s) } } -static bool is_singlestepping(DisasContext *s) -{ - /* - * Return true if we are singlestepping either because of - * architectural singlestep or QEMU gdbstub singlestep. This does - * not include the command line '-singlestep' mode which is rather - * misnamed as it only means "one instruction per TB" and doesn't - * affect the code we generate. - */ - return s->base.singlestep_enabled || s->ss_active; -} - /* is_jmp field values */ #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */ #define DISAS_EXIT DISAS_TARGET_1 /* cpu state was modified dynamically */ @@ -320,20 +308,6 @@ static void gen_exception(DisasContext *s, uint32_t dest, int nr) s->base.is_jmp = DISAS_NORETURN; } -static void gen_singlestep_exception(DisasContext *s) -{ - /* - * Generate the right kind of exception for singlestep, which is - * either the architectural singlestep or EXCP_DEBUG for QEMU's - * gdb singlestepping. - */ - if (s->ss_active) { - gen_raise_exception(EXCP_TRACE); - } else { - gen_raise_exception(EXCP_DEBUG); - } -} - static inline void gen_addr_fault(DisasContext *s) { gen_exception(s, s->base.pc_next, EXCP_ADDRESS); @@ -1522,10 +1496,10 @@ static void gen_exit_tb(DisasContext *s) /* Generate a jump to an immediate address. */ static void gen_jmp_tb(DisasContext *s, int n, uint32_t dest) { - if (unlikely(is_singlestepping(s))) { + if (unlikely(s->ss_active)) { update_cc_op(s); tcg_gen_movi_i32(QREG_PC, dest); - gen_singlestep_exception(s); + gen_raise_exception(EXCP_TRACE); } else if (translator_use_goto_tb(&s->base, dest)) { tcg_gen_goto_tb(n); tcg_gen_movi_i32(QREG_PC, dest); @@ -6193,7 +6167,7 @@ static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) dc->ss_active = (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS); /* If architectural single step active, limit to 1 */ - if (is_singlestepping(dc)) { + if (dc->ss_active) { dc->base.max_insns = 1; } } @@ -6252,17 +6226,17 @@ static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) break; case DISAS_TOO_MANY: update_cc_op(dc); - if (is_singlestepping(dc)) { + if (dc->ss_active) { tcg_gen_movi_i32(QREG_PC, dc->pc); - gen_singlestep_exception(dc); + gen_raise_exception(EXCP_TRACE); } else { gen_jmp_tb(dc, 0, dc->pc); } break; case DISAS_JUMP: /* We updated CC_OP and PC in gen_jmp/gen_jmp_im. */ - if (is_singlestepping(dc)) { - gen_singlestep_exception(dc); + if (dc->ss_active) { + gen_raise_exception(EXCP_TRACE); } else { tcg_gen_lookup_and_goto_ptr(); } @@ -6272,8 +6246,8 @@ static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) * We updated CC_OP and PC in gen_exit_tb, but also modified * other state that may require returning to the main loop. */ - if (is_singlestepping(dc)) { - gen_singlestep_exception(dc); + if (dc->ss_active) { + gen_raise_exception(EXCP_TRACE); } else { tcg_gen_exit_tb(NULL, 0); } diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c index 15db277925..b9c888b87e 100644 --- a/target/microblaze/cpu.c +++ b/target/microblaze/cpu.c @@ -365,9 +365,9 @@ static const struct SysemuCPUOps mb_sysemu_ops = { static const struct TCGCPUOps mb_tcg_ops = { .initialize = mb_tcg_init, .synchronize_from_tb = mb_cpu_synchronize_from_tb, - .tlb_fill = mb_cpu_tlb_fill, #ifndef CONFIG_USER_ONLY + .tlb_fill = mb_cpu_tlb_fill, .cpu_exec_interrupt = mb_cpu_exec_interrupt, .do_interrupt = mb_cpu_do_interrupt, .do_transaction_failed = mb_cpu_transaction_failed, diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h index b7a848bbae..e9cd0b88de 100644 --- a/target/microblaze/cpu.h +++ b/target/microblaze/cpu.h @@ -394,10 +394,6 @@ void mb_tcg_init(void); #define MMU_USER_IDX 2 /* See NB_MMU_MODES further up the file. */ -bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); - typedef CPUMBState CPUArchState; typedef MicroBlazeCPU ArchCPU; @@ -415,6 +411,10 @@ static inline void cpu_get_tb_cpu_state(CPUMBState *env, target_ulong *pc, } #if !defined(CONFIG_USER_ONLY) +bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); + void mb_cpu_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, unsigned size, MMUAccessType access_type, int mmu_idx, MemTxAttrs attrs, diff --git a/target/microblaze/helper.c b/target/microblaze/helper.c index dd2aecd1d5..a607fe68e5 100644 --- a/target/microblaze/helper.c +++ b/target/microblaze/helper.c @@ -24,18 +24,7 @@ #include "qemu/host-utils.h" #include "exec/log.h" -#if defined(CONFIG_USER_ONLY) - -bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) -{ - cs->exception_index = 0xaa; - cpu_loop_exit_restore(cs, retaddr); -} - -#else /* !CONFIG_USER_ONLY */ - +#ifndef CONFIG_USER_ONLY static bool mb_cpu_access_is_secure(MicroBlazeCPU *cpu, MMUAccessType access_type) { diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c index a14ffed784..2561b904b9 100644 --- a/target/microblaze/translate.c +++ b/target/microblaze/translate.c @@ -126,12 +126,7 @@ static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec) static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest) { - if (dc->base.singlestep_enabled) { - TCGv_i32 tmp = tcg_const_i32(EXCP_DEBUG); - tcg_gen_movi_i32(cpu_pc, dest); - gen_helper_raise_exception(cpu_env, tmp); - tcg_temp_free_i32(tmp); - } else if (translator_use_goto_tb(&dc->base, dest)) { + if (translator_use_goto_tb(&dc->base, dest)) { tcg_gen_goto_tb(n); tcg_gen_movi_i32(cpu_pc, dest); tcg_gen_exit_tb(dc->base.tb, n); @@ -727,6 +722,7 @@ static TCGv compute_ldst_addr_ea(DisasContext *dc, int ra, int rb) } #endif +#ifndef CONFIG_USER_ONLY static void record_unaligned_ess(DisasContext *dc, int rd, MemOp size, bool store) { @@ -739,6 +735,7 @@ static void record_unaligned_ess(DisasContext *dc, int rd, tcg_set_insn_start_param(dc->insn_start, 1, iflags); } +#endif static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop, int mem_index, bool rev) @@ -760,12 +757,19 @@ static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop, } } + /* + * For system mode, enforce alignment if the cpu configuration + * requires it. For user-mode, the Linux kernel will have fixed up + * any unaligned access, so emulate that by *not* setting MO_ALIGN. + */ +#ifndef CONFIG_USER_ONLY if (size > MO_8 && (dc->tb_flags & MSR_EE) && dc->cfg->unaligned_exceptions) { record_unaligned_ess(dc, rd, size, false); mop |= MO_ALIGN; } +#endif tcg_gen_qemu_ld_i32(reg_for_write(dc, rd), addr, mem_index, mop); @@ -906,12 +910,19 @@ static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop, } } + /* + * For system mode, enforce alignment if the cpu configuration + * requires it. For user-mode, the Linux kernel will have fixed up + * any unaligned access, so emulate that by *not* setting MO_ALIGN. + */ +#ifndef CONFIG_USER_ONLY if (size > MO_8 && (dc->tb_flags & MSR_EE) && dc->cfg->unaligned_exceptions) { record_unaligned_ess(dc, rd, size, true); mop |= MO_ALIGN; } +#endif tcg_gen_qemu_st_i32(reg_for_read(dc, rd), addr, mem_index, mop); @@ -1779,7 +1790,7 @@ static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs) break; case DISAS_JUMP: - if (dc->jmp_dest != -1 && !cs->singlestep_enabled) { + if (dc->jmp_dest != -1 && !(tb_cflags(dc->base.tb) & CF_NO_GOTO_TB)) { /* Direct jump. */ tcg_gen_discard_i32(cpu_btarget); @@ -1804,15 +1815,10 @@ static void mb_tr_tb_stop(DisasContextBase *dcb, CPUState *cs) return; } - /* Indirect jump (or direct jump w/ singlestep) */ + /* Indirect jump (or direct jump w/ goto_tb disabled) */ tcg_gen_mov_i32(cpu_pc, cpu_btarget); tcg_gen_discard_i32(cpu_btarget); - - if (unlikely(cs->singlestep_enabled)) { - gen_raise_exception(dc, EXCP_DEBUG); - } else { - tcg_gen_lookup_and_goto_ptr(); - } + tcg_gen_lookup_and_goto_ptr(); return; default: diff --git a/target/mips/cpu-defs.c.inc b/target/mips/cpu-defs.c.inc index cbc45fcb0e..582f940070 100644 --- a/target/mips/cpu-defs.c.inc +++ b/target/mips/cpu-defs.c.inc @@ -369,7 +369,6 @@ const mips_def_t mips_defs[] = * Config3: VZ, CTXTC, CDMM, TL * Config4: MMUExtDef * Config5: MRP - * FIR(FCR0): Has2008 * */ .name = "P5600", .CP0_PRid = 0x0001A800, @@ -886,6 +885,7 @@ const mips_def_t mips_defs[] = (0x1 << FCR0_D) | (0x1 << FCR0_S), .CP1_fcr31 = 0, .CP1_fcr31_rw_bitmask = 0xFF83FFFF, + .MSAIR = (0x01 << MSAIR_ProcID) | (0x40 << MSAIR_Rev), .SEGBITS = 48, .PABITS = 48, .insn_flags = CPU_MIPS64R2 | INSN_LOONGSON3A | diff --git a/target/mips/cpu.c b/target/mips/cpu.c index 00e0c55d0e..4aae23934b 100644 --- a/target/mips/cpu.c +++ b/target/mips/cpu.c @@ -539,9 +539,9 @@ static const struct SysemuCPUOps mips_sysemu_ops = { static const struct TCGCPUOps mips_tcg_ops = { .initialize = mips_tcg_init, .synchronize_from_tb = mips_cpu_synchronize_from_tb, - .tlb_fill = mips_cpu_tlb_fill, #if !defined(CONFIG_USER_ONLY) + .tlb_fill = mips_cpu_tlb_fill, .cpu_exec_interrupt = mips_cpu_exec_interrupt, .do_interrupt = mips_cpu_do_interrupt, .do_transaction_failed = mips_cpu_do_transaction_failed, diff --git a/target/mips/tcg/meson.build b/target/mips/tcg/meson.build index 8f6f7508b6..98003779ae 100644 --- a/target/mips/tcg/meson.build +++ b/target/mips/tcg/meson.build @@ -28,9 +28,6 @@ mips_ss.add(when: 'TARGET_MIPS64', if_true: files( 'mxu_translate.c', )) -if have_user - subdir('user') -endif if have_system subdir('sysemu') endif diff --git a/target/mips/tcg/msa.decode b/target/mips/tcg/msa.decode index 74d99f6862..9575289195 100644 --- a/target/mips/tcg/msa.decode +++ b/target/mips/tcg/msa.decode @@ -13,19 +13,246 @@ &r rs rt rd sa -&msa_bz df wt s16 +&msa_r df wd ws wt +&msa_bz df wt sa +&msa_ldi df wd sa +&msa_i df wd ws sa +&msa_bit df wd ws m +&msa_elm_df df wd ws n +&msa_elm wd ws + +%elm_df 16:6 !function=elm_df +%elm_n 16:6 !function=elm_n +%bit_df 16:7 !function=bit_df +%bit_m 16:7 !function=bit_m +%2r_df_w 16:1 !function=plus_2 +%3r_df_h 21:1 !function=plus_1 +%3r_df_w 21:1 !function=plus_2 @lsa ...... rs:5 rt:5 rd:5 ... sa:2 ...... &r -@bz ...... ... .. wt:5 s16:16 &msa_bz df=3 -@bz_df ...... ... df:2 wt:5 s16:16 &msa_bz +@ldst ...... sa:s10 ws:5 wd:5 .... df:2 &msa_i +@bz_v ...... ... .. wt:5 sa:16 &msa_bz df=3 +@bz ...... ... df:2 wt:5 sa:16 &msa_bz +@elm_df ...... .... ...... ws:5 wd:5 ...... &msa_elm_df df=%elm_df n=%elm_n +@elm ...... .......... ws:5 wd:5 ...... &msa_elm +@vec ...... ..... wt:5 ws:5 wd:5 ...... &msa_r df=0 +@2r ...... ........ df:2 ws:5 wd:5 ...... &msa_r wt=0 +@2rf ...... ......... . ws:5 wd:5 ...... &msa_r wt=0 df=%2r_df_w +@3r ...... ... df:2 wt:5 ws:5 wd:5 ...... &msa_r +@3rf_h ...... .... . wt:5 ws:5 wd:5 ...... &msa_r df=%3r_df_h +@3rf_w ...... .... . wt:5 ws:5 wd:5 ...... &msa_r df=%3r_df_w +@u5 ...... ... df:2 sa:5 ws:5 wd:5 ...... &msa_i +@s5 ...... ... df:2 sa:s5 ws:5 wd:5 ...... &msa_i +@i8_df ...... df:2 sa:s8 ws:5 wd:5 ...... &msa_i +@i8 ...... .. sa:s8 ws:5 wd:5 ...... &msa_i df=0 +@ldi ...... ... df:2 sa:s10 wd:5 ...... &msa_ldi +@bit ...... ... ....... ws:5 wd:5 ...... &msa_bit df=%bit_df m=%bit_m LSA 000000 ..... ..... ..... 000 .. 000101 @lsa DLSA 000000 ..... ..... ..... 000 .. 010101 @lsa -BZ_V 010001 01011 ..... ................ @bz -BNZ_V 010001 01111 ..... ................ @bz +BZ_V 010001 01011 ..... ................ @bz_v +BNZ_V 010001 01111 ..... ................ @bz_v +BZ 010001 110 .. ..... ................ @bz +BNZ 010001 111 .. ..... ................ @bz -BZ_x 010001 110 .. ..... ................ @bz_df -BNZ_x 010001 111 .. ..... ................ @bz_df +ANDI 011110 00 ........ ..... ..... 000000 @i8 +ORI 011110 01 ........ ..... ..... 000000 @i8 +NORI 011110 10 ........ ..... ..... 000000 @i8 +XORI 011110 11 ........ ..... ..... 000000 @i8 +BMNZI 011110 00 ........ ..... ..... 000001 @i8 +BMZI 011110 01 ........ ..... ..... 000001 @i8 +BSELI 011110 10 ........ ..... ..... 000001 @i8 +SHF 011110 .. ........ ..... ..... 000010 @i8_df -MSA 011110 -------------------------- +ADDVI 011110 000 .. ..... ..... ..... 000110 @u5 +SUBVI 011110 001 .. ..... ..... ..... 000110 @u5 +MAXI_S 011110 010 .. ..... ..... ..... 000110 @s5 +MAXI_U 011110 011 .. ..... ..... ..... 000110 @u5 +MINI_S 011110 100 .. ..... ..... ..... 000110 @s5 +MINI_U 011110 101 .. ..... ..... ..... 000110 @u5 + +CEQI 011110 000 .. ..... ..... ..... 000111 @s5 +CLTI_S 011110 010 .. ..... ..... ..... 000111 @s5 +CLTI_U 011110 011 .. ..... ..... ..... 000111 @u5 +CLEI_S 011110 100 .. ..... ..... ..... 000111 @s5 +CLEI_U 011110 101 .. ..... ..... ..... 000111 @u5 + +LDI 011110 110 .. .......... ..... 000111 @ldi + +SLLI 011110 000 ....... ..... ..... 001001 @bit +SRAI 011110 001 ....... ..... ..... 001001 @bit +SRLI 011110 010 ....... ..... ..... 001001 @bit +BCLRI 011110 011 ....... ..... ..... 001001 @bit +BSETI 011110 100 ....... ..... ..... 001001 @bit +BNEGI 011110 101 ....... ..... ..... 001001 @bit +BINSLI 011110 110 ....... ..... ..... 001001 @bit +BINSRI 011110 111 ....... ..... ..... 001001 @bit + +SAT_S 011110 000 ....... ..... ..... 001010 @bit +SAT_U 011110 001 ....... ..... ..... 001010 @bit +SRARI 011110 010 ....... ..... ..... 001010 @bit +SRLRI 011110 011 ....... ..... ..... 001010 @bit + +SLL 011110 000.. ..... ..... ..... 001101 @3r +SRA 011110 001.. ..... ..... ..... 001101 @3r +SRL 011110 010.. ..... ..... ..... 001101 @3r +BCLR 011110 011.. ..... ..... ..... 001101 @3r +BSET 011110 100.. ..... ..... ..... 001101 @3r +BNEG 011110 101.. ..... ..... ..... 001101 @3r +BINSL 011110 110.. ..... ..... ..... 001101 @3r +BINSR 011110 111.. ..... ..... ..... 001101 @3r + +ADDV 011110 000.. ..... ..... ..... 001110 @3r +SUBV 011110 001.. ..... ..... ..... 001110 @3r +MAX_S 011110 010.. ..... ..... ..... 001110 @3r +MAX_U 011110 011.. ..... ..... ..... 001110 @3r +MIN_S 011110 100.. ..... ..... ..... 001110 @3r +MIN_U 011110 101.. ..... ..... ..... 001110 @3r +MAX_A 011110 110.. ..... ..... ..... 001110 @3r +MIN_A 011110 111.. ..... ..... ..... 001110 @3r + +CEQ 011110 000.. ..... ..... ..... 001111 @3r +CLT_S 011110 010.. ..... ..... ..... 001111 @3r +CLT_U 011110 011.. ..... ..... ..... 001111 @3r +CLE_S 011110 100.. ..... ..... ..... 001111 @3r +CLE_U 011110 101.. ..... ..... ..... 001111 @3r + +ADD_A 011110 000.. ..... ..... ..... 010000 @3r +ADDS_A 011110 001.. ..... ..... ..... 010000 @3r +ADDS_S 011110 010.. ..... ..... ..... 010000 @3r +ADDS_U 011110 011.. ..... ..... ..... 010000 @3r +AVE_S 011110 100.. ..... ..... ..... 010000 @3r +AVE_U 011110 101.. ..... ..... ..... 010000 @3r +AVER_S 011110 110.. ..... ..... ..... 010000 @3r +AVER_U 011110 111.. ..... ..... ..... 010000 @3r + +SUBS_S 011110 000.. ..... ..... ..... 010001 @3r +SUBS_U 011110 001.. ..... ..... ..... 010001 @3r +SUBSUS_U 011110 010.. ..... ..... ..... 010001 @3r +SUBSUU_S 011110 011.. ..... ..... ..... 010001 @3r +ASUB_S 011110 100.. ..... ..... ..... 010001 @3r +ASUB_U 011110 101.. ..... ..... ..... 010001 @3r + +MULV 011110 000.. ..... ..... ..... 010010 @3r +MADDV 011110 001.. ..... ..... ..... 010010 @3r +MSUBV 011110 010.. ..... ..... ..... 010010 @3r +DIV_S 011110 100.. ..... ..... ..... 010010 @3r +DIV_U 011110 101.. ..... ..... ..... 010010 @3r +MOD_S 011110 110.. ..... ..... ..... 010010 @3r +MOD_U 011110 111.. ..... ..... ..... 010010 @3r + +DOTP_S 011110 000.. ..... ..... ..... 010011 @3r +DOTP_U 011110 001.. ..... ..... ..... 010011 @3r +DPADD_S 011110 010.. ..... ..... ..... 010011 @3r +DPADD_U 011110 011.. ..... ..... ..... 010011 @3r +DPSUB_S 011110 100.. ..... ..... ..... 010011 @3r +DPSUB_U 011110 101.. ..... ..... ..... 010011 @3r + +SLD 011110 000 .. ..... ..... ..... 010100 @3r +SPLAT 011110 001 .. ..... ..... ..... 010100 @3r +PCKEV 011110 010 .. ..... ..... ..... 010100 @3r +PCKOD 011110 011 .. ..... ..... ..... 010100 @3r +ILVL 011110 100 .. ..... ..... ..... 010100 @3r +ILVR 011110 101 .. ..... ..... ..... 010100 @3r +ILVEV 011110 110 .. ..... ..... ..... 010100 @3r +ILVOD 011110 111 .. ..... ..... ..... 010100 @3r + +VSHF 011110 000 .. ..... ..... ..... 010101 @3r +SRAR 011110 001 .. ..... ..... ..... 010101 @3r +SRLR 011110 010 .. ..... ..... ..... 010101 @3r +HADD_S 011110 100.. ..... ..... ..... 010101 @3r +HADD_U 011110 101.. ..... ..... ..... 010101 @3r +HSUB_S 011110 110.. ..... ..... ..... 010101 @3r +HSUB_U 011110 111.. ..... ..... ..... 010101 @3r + +{ + CTCMSA 011110 0000111110 ..... ..... 011001 @elm + SLDI 011110 0000 ...... ..... ..... 011001 @elm_df +} +{ + CFCMSA 011110 0001111110 ..... ..... 011001 @elm + SPLATI 011110 0001 ...... ..... ..... 011001 @elm_df +} +{ + MOVE_V 011110 0010111110 ..... ..... 011001 @elm + COPY_S 011110 0010 ...... ..... ..... 011001 @elm_df +} +COPY_U 011110 0011 ...... ..... ..... 011001 @elm_df +INSERT 011110 0100 ...... ..... ..... 011001 @elm_df +INSVE 011110 0101 ...... ..... ..... 011001 @elm_df + +FCAF 011110 0000 . ..... ..... ..... 011010 @3rf_w +FCUN 011110 0001 . ..... ..... ..... 011010 @3rf_w +FCEQ 011110 0010 . ..... ..... ..... 011010 @3rf_w +FCUEQ 011110 0011 . ..... ..... ..... 011010 @3rf_w +FCLT 011110 0100 . ..... ..... ..... 011010 @3rf_w +FCULT 011110 0101 . ..... ..... ..... 011010 @3rf_w +FCLE 011110 0110 . ..... ..... ..... 011010 @3rf_w +FCULE 011110 0111 . ..... ..... ..... 011010 @3rf_w +FSAF 011110 1000 . ..... ..... ..... 011010 @3rf_w +FSUN 011110 1001 . ..... ..... ..... 011010 @3rf_w +FSEQ 011110 1010 . ..... ..... ..... 011010 @3rf_w +FSUEQ 011110 1011 . ..... ..... ..... 011010 @3rf_w +FSLT 011110 1100 . ..... ..... ..... 011010 @3rf_w +FSULT 011110 1101 . ..... ..... ..... 011010 @3rf_w +FSLE 011110 1110 . ..... ..... ..... 011010 @3rf_w +FSULE 011110 1111 . ..... ..... ..... 011010 @3rf_w + +FADD 011110 0000 . ..... ..... ..... 011011 @3rf_w +FSUB 011110 0001 . ..... ..... ..... 011011 @3rf_w +FMUL 011110 0010 . ..... ..... ..... 011011 @3rf_w +FDIV 011110 0011 . ..... ..... ..... 011011 @3rf_w +FMADD 011110 0100 . ..... ..... ..... 011011 @3rf_w +FMSUB 011110 0101 . ..... ..... ..... 011011 @3rf_w +FEXP2 011110 0111 . ..... ..... ..... 011011 @3rf_w +FEXDO 011110 1000 . ..... ..... ..... 011011 @3rf_w +FTQ 011110 1010 . ..... ..... ..... 011011 @3rf_w +FMIN 011110 1100 . ..... ..... ..... 011011 @3rf_w +FMIN_A 011110 1101 . ..... ..... ..... 011011 @3rf_w +FMAX 011110 1110 . ..... ..... ..... 011011 @3rf_w +FMAX_A 011110 1111 . ..... ..... ..... 011011 @3rf_w + +FCOR 011110 0001 . ..... ..... ..... 011100 @3rf_w +FCUNE 011110 0010 . ..... ..... ..... 011100 @3rf_w +FCNE 011110 0011 . ..... ..... ..... 011100 @3rf_w +MUL_Q 011110 0100 . ..... ..... ..... 011100 @3rf_h +MADD_Q 011110 0101 . ..... ..... ..... 011100 @3rf_h +MSUB_Q 011110 0110 . ..... ..... ..... 011100 @3rf_h +FSOR 011110 1001 . ..... ..... ..... 011100 @3rf_w +FSUNE 011110 1010 . ..... ..... ..... 011100 @3rf_w +FSNE 011110 1011 . ..... ..... ..... 011100 @3rf_w +MULR_Q 011110 1100 . ..... ..... ..... 011100 @3rf_h +MADDR_Q 011110 1101 . ..... ..... ..... 011100 @3rf_h +MSUBR_Q 011110 1110 . ..... ..... ..... 011100 @3rf_h + +AND_V 011110 00000 ..... ..... ..... 011110 @vec +OR_V 011110 00001 ..... ..... ..... 011110 @vec +NOR_V 011110 00010 ..... ..... ..... 011110 @vec +XOR_V 011110 00011 ..... ..... ..... 011110 @vec +BMNZ_V 011110 00100 ..... ..... ..... 011110 @vec +BMZ_V 011110 00101 ..... ..... ..... 011110 @vec +BSEL_V 011110 00110 ..... ..... ..... 011110 @vec +FILL 011110 11000000 .. ..... ..... 011110 @2r +PCNT 011110 11000001 .. ..... ..... 011110 @2r +NLOC 011110 11000010 .. ..... ..... 011110 @2r +NLZC 011110 11000011 .. ..... ..... 011110 @2r +FCLASS 011110 110010000 . ..... ..... 011110 @2rf +FTRUNC_S 011110 110010001 . ..... ..... 011110 @2rf +FTRUNC_U 011110 110010010 . ..... ..... 011110 @2rf +FSQRT 011110 110010011 . ..... ..... 011110 @2rf +FRSQRT 011110 110010100 . ..... ..... 011110 @2rf +FRCP 011110 110010101 . ..... ..... 011110 @2rf +FRINT 011110 110010110 . ..... ..... 011110 @2rf +FLOG2 011110 110010111 . ..... ..... 011110 @2rf +FEXUPL 011110 110011000 . ..... ..... 011110 @2rf +FEXUPR 011110 110011001 . ..... ..... 011110 @2rf +FFQL 011110 110011010 . ..... ..... 011110 @2rf +FFQR 011110 110011011 . ..... ..... 011110 @2rf +FTINT_S 011110 110011100 . ..... ..... 011110 @2rf +FTINT_U 011110 110011101 . ..... ..... 011110 @2rf +FFINT_S 011110 110011110 . ..... ..... 011110 @2rf +FFINT_U 011110 110011111 . ..... ..... 011110 @2rf + +LD 011110 .......... ..... ..... 1000 .. @ldst +ST 011110 .......... ..... ..... 1001 .. @ldst diff --git a/target/mips/tcg/msa_helper.c b/target/mips/tcg/msa_helper.c index 04af54f66d..5667b1f0a1 100644 --- a/target/mips/tcg/msa_helper.c +++ b/target/mips/tcg/msa_helper.c @@ -3231,22 +3231,22 @@ void helper_msa_maddv_b(CPUMIPSState *env, wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - pwd->b[0] = msa_maddv_df(DF_BYTE, pwt->b[0], pws->b[0], pwt->b[0]); - pwd->b[1] = msa_maddv_df(DF_BYTE, pwt->b[1], pws->b[1], pwt->b[1]); - pwd->b[2] = msa_maddv_df(DF_BYTE, pwt->b[2], pws->b[2], pwt->b[2]); - pwd->b[3] = msa_maddv_df(DF_BYTE, pwt->b[3], pws->b[3], pwt->b[3]); - pwd->b[4] = msa_maddv_df(DF_BYTE, pwt->b[4], pws->b[4], pwt->b[4]); - pwd->b[5] = msa_maddv_df(DF_BYTE, pwt->b[5], pws->b[5], pwt->b[5]); - pwd->b[6] = msa_maddv_df(DF_BYTE, pwt->b[6], pws->b[6], pwt->b[6]); - pwd->b[7] = msa_maddv_df(DF_BYTE, pwt->b[7], pws->b[7], pwt->b[7]); - pwd->b[8] = msa_maddv_df(DF_BYTE, pwt->b[8], pws->b[8], pwt->b[8]); - pwd->b[9] = msa_maddv_df(DF_BYTE, pwt->b[9], pws->b[9], pwt->b[9]); - pwd->b[10] = msa_maddv_df(DF_BYTE, pwt->b[10], pws->b[10], pwt->b[10]); - pwd->b[11] = msa_maddv_df(DF_BYTE, pwt->b[11], pws->b[11], pwt->b[11]); - pwd->b[12] = msa_maddv_df(DF_BYTE, pwt->b[12], pws->b[12], pwt->b[12]); - pwd->b[13] = msa_maddv_df(DF_BYTE, pwt->b[13], pws->b[13], pwt->b[13]); - pwd->b[14] = msa_maddv_df(DF_BYTE, pwt->b[14], pws->b[14], pwt->b[14]); - pwd->b[15] = msa_maddv_df(DF_BYTE, pwt->b[15], pws->b[15], pwt->b[15]); + pwd->b[0] = msa_maddv_df(DF_BYTE, pwd->b[0], pws->b[0], pwt->b[0]); + pwd->b[1] = msa_maddv_df(DF_BYTE, pwd->b[1], pws->b[1], pwt->b[1]); + pwd->b[2] = msa_maddv_df(DF_BYTE, pwd->b[2], pws->b[2], pwt->b[2]); + pwd->b[3] = msa_maddv_df(DF_BYTE, pwd->b[3], pws->b[3], pwt->b[3]); + pwd->b[4] = msa_maddv_df(DF_BYTE, pwd->b[4], pws->b[4], pwt->b[4]); + pwd->b[5] = msa_maddv_df(DF_BYTE, pwd->b[5], pws->b[5], pwt->b[5]); + pwd->b[6] = msa_maddv_df(DF_BYTE, pwd->b[6], pws->b[6], pwt->b[6]); + pwd->b[7] = msa_maddv_df(DF_BYTE, pwd->b[7], pws->b[7], pwt->b[7]); + pwd->b[8] = msa_maddv_df(DF_BYTE, pwd->b[8], pws->b[8], pwt->b[8]); + pwd->b[9] = msa_maddv_df(DF_BYTE, pwd->b[9], pws->b[9], pwt->b[9]); + pwd->b[10] = msa_maddv_df(DF_BYTE, pwd->b[10], pws->b[10], pwt->b[10]); + pwd->b[11] = msa_maddv_df(DF_BYTE, pwd->b[11], pws->b[11], pwt->b[11]); + pwd->b[12] = msa_maddv_df(DF_BYTE, pwd->b[12], pws->b[12], pwt->b[12]); + pwd->b[13] = msa_maddv_df(DF_BYTE, pwd->b[13], pws->b[13], pwt->b[13]); + pwd->b[14] = msa_maddv_df(DF_BYTE, pwd->b[14], pws->b[14], pwt->b[14]); + pwd->b[15] = msa_maddv_df(DF_BYTE, pwd->b[15], pws->b[15], pwt->b[15]); } void helper_msa_maddv_h(CPUMIPSState *env, @@ -3303,22 +3303,22 @@ void helper_msa_msubv_b(CPUMIPSState *env, wr_t *pws = &(env->active_fpu.fpr[ws].wr); wr_t *pwt = &(env->active_fpu.fpr[wt].wr); - pwd->b[0] = msa_msubv_df(DF_BYTE, pwt->b[0], pws->b[0], pwt->b[0]); - pwd->b[1] = msa_msubv_df(DF_BYTE, pwt->b[1], pws->b[1], pwt->b[1]); - pwd->b[2] = msa_msubv_df(DF_BYTE, pwt->b[2], pws->b[2], pwt->b[2]); - pwd->b[3] = msa_msubv_df(DF_BYTE, pwt->b[3], pws->b[3], pwt->b[3]); - pwd->b[4] = msa_msubv_df(DF_BYTE, pwt->b[4], pws->b[4], pwt->b[4]); - pwd->b[5] = msa_msubv_df(DF_BYTE, pwt->b[5], pws->b[5], pwt->b[5]); - pwd->b[6] = msa_msubv_df(DF_BYTE, pwt->b[6], pws->b[6], pwt->b[6]); - pwd->b[7] = msa_msubv_df(DF_BYTE, pwt->b[7], pws->b[7], pwt->b[7]); - pwd->b[8] = msa_msubv_df(DF_BYTE, pwt->b[8], pws->b[8], pwt->b[8]); - pwd->b[9] = msa_msubv_df(DF_BYTE, pwt->b[9], pws->b[9], pwt->b[9]); - pwd->b[10] = msa_msubv_df(DF_BYTE, pwt->b[10], pws->b[10], pwt->b[10]); - pwd->b[11] = msa_msubv_df(DF_BYTE, pwt->b[11], pws->b[11], pwt->b[11]); - pwd->b[12] = msa_msubv_df(DF_BYTE, pwt->b[12], pws->b[12], pwt->b[12]); - pwd->b[13] = msa_msubv_df(DF_BYTE, pwt->b[13], pws->b[13], pwt->b[13]); - pwd->b[14] = msa_msubv_df(DF_BYTE, pwt->b[14], pws->b[14], pwt->b[14]); - pwd->b[15] = msa_msubv_df(DF_BYTE, pwt->b[15], pws->b[15], pwt->b[15]); + pwd->b[0] = msa_msubv_df(DF_BYTE, pwd->b[0], pws->b[0], pwt->b[0]); + pwd->b[1] = msa_msubv_df(DF_BYTE, pwd->b[1], pws->b[1], pwt->b[1]); + pwd->b[2] = msa_msubv_df(DF_BYTE, pwd->b[2], pws->b[2], pwt->b[2]); + pwd->b[3] = msa_msubv_df(DF_BYTE, pwd->b[3], pws->b[3], pwt->b[3]); + pwd->b[4] = msa_msubv_df(DF_BYTE, pwd->b[4], pws->b[4], pwt->b[4]); + pwd->b[5] = msa_msubv_df(DF_BYTE, pwd->b[5], pws->b[5], pwt->b[5]); + pwd->b[6] = msa_msubv_df(DF_BYTE, pwd->b[6], pws->b[6], pwt->b[6]); + pwd->b[7] = msa_msubv_df(DF_BYTE, pwd->b[7], pws->b[7], pwt->b[7]); + pwd->b[8] = msa_msubv_df(DF_BYTE, pwd->b[8], pws->b[8], pwt->b[8]); + pwd->b[9] = msa_msubv_df(DF_BYTE, pwd->b[9], pws->b[9], pwt->b[9]); + pwd->b[10] = msa_msubv_df(DF_BYTE, pwd->b[10], pws->b[10], pwt->b[10]); + pwd->b[11] = msa_msubv_df(DF_BYTE, pwd->b[11], pws->b[11], pwt->b[11]); + pwd->b[12] = msa_msubv_df(DF_BYTE, pwd->b[12], pws->b[12], pwt->b[12]); + pwd->b[13] = msa_msubv_df(DF_BYTE, pwd->b[13], pws->b[13], pwt->b[13]); + pwd->b[14] = msa_msubv_df(DF_BYTE, pwd->b[14], pws->b[14], pwt->b[14]); + pwd->b[15] = msa_msubv_df(DF_BYTE, pwd->b[15], pws->b[15], pwt->b[15]); } void helper_msa_msubv_h(CPUMIPSState *env, @@ -8211,185 +8211,93 @@ void helper_msa_ffint_u_df(CPUMIPSState *env, uint32_t df, uint32_t wd, #define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df)) #if !defined(CONFIG_USER_ONLY) -#define MEMOP_IDX(DF) \ - TCGMemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \ - cpu_mmu_index(env, false)); +#define MEMOP_IDX(DF) \ + MemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \ + cpu_mmu_index(env, false)); #else #define MEMOP_IDX(DF) #endif +#ifdef TARGET_WORDS_BIGENDIAN +static inline uint64_t bswap16x4(uint64_t x) +{ + uint64_t m = 0x00ff00ff00ff00ffull; + return ((x & m) << 8) | ((x >> 8) & m); +} + +static inline uint64_t bswap32x2(uint64_t x) +{ + return ror64(bswap64(x), 32); +} +#endif + void helper_msa_ld_b(CPUMIPSState *env, uint32_t wd, target_ulong addr) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - MEMOP_IDX(DF_BYTE) -#if !defined(CONFIG_USER_ONLY) -#if !defined(HOST_WORDS_BIGENDIAN) - pwd->b[0] = helper_ret_ldub_mmu(env, addr + (0 << DF_BYTE), oi, GETPC()); - pwd->b[1] = helper_ret_ldub_mmu(env, addr + (1 << DF_BYTE), oi, GETPC()); - pwd->b[2] = helper_ret_ldub_mmu(env, addr + (2 << DF_BYTE), oi, GETPC()); - pwd->b[3] = helper_ret_ldub_mmu(env, addr + (3 << DF_BYTE), oi, GETPC()); - pwd->b[4] = helper_ret_ldub_mmu(env, addr + (4 << DF_BYTE), oi, GETPC()); - pwd->b[5] = helper_ret_ldub_mmu(env, addr + (5 << DF_BYTE), oi, GETPC()); - pwd->b[6] = helper_ret_ldub_mmu(env, addr + (6 << DF_BYTE), oi, GETPC()); - pwd->b[7] = helper_ret_ldub_mmu(env, addr + (7 << DF_BYTE), oi, GETPC()); - pwd->b[8] = helper_ret_ldub_mmu(env, addr + (8 << DF_BYTE), oi, GETPC()); - pwd->b[9] = helper_ret_ldub_mmu(env, addr + (9 << DF_BYTE), oi, GETPC()); - pwd->b[10] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC()); - pwd->b[11] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC()); - pwd->b[12] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC()); - pwd->b[13] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC()); - pwd->b[14] = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC()); - pwd->b[15] = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC()); -#else - pwd->b[0] = helper_ret_ldub_mmu(env, addr + (7 << DF_BYTE), oi, GETPC()); - pwd->b[1] = helper_ret_ldub_mmu(env, addr + (6 << DF_BYTE), oi, GETPC()); - pwd->b[2] = helper_ret_ldub_mmu(env, addr + (5 << DF_BYTE), oi, GETPC()); - pwd->b[3] = helper_ret_ldub_mmu(env, addr + (4 << DF_BYTE), oi, GETPC()); - pwd->b[4] = helper_ret_ldub_mmu(env, addr + (3 << DF_BYTE), oi, GETPC()); - pwd->b[5] = helper_ret_ldub_mmu(env, addr + (2 << DF_BYTE), oi, GETPC()); - pwd->b[6] = helper_ret_ldub_mmu(env, addr + (1 << DF_BYTE), oi, GETPC()); - pwd->b[7] = helper_ret_ldub_mmu(env, addr + (0 << DF_BYTE), oi, GETPC()); - pwd->b[8] = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC()); - pwd->b[9] = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC()); - pwd->b[10] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC()); - pwd->b[11] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC()); - pwd->b[12] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC()); - pwd->b[13] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC()); - pwd->b[14] = helper_ret_ldub_mmu(env, addr + (9 << DF_BYTE), oi, GETPC()); - pwd->b[15] = helper_ret_ldub_mmu(env, addr + (8 << DF_BYTE), oi, GETPC()); -#endif -#else -#if !defined(HOST_WORDS_BIGENDIAN) - pwd->b[0] = cpu_ldub_data(env, addr + (0 << DF_BYTE)); - pwd->b[1] = cpu_ldub_data(env, addr + (1 << DF_BYTE)); - pwd->b[2] = cpu_ldub_data(env, addr + (2 << DF_BYTE)); - pwd->b[3] = cpu_ldub_data(env, addr + (3 << DF_BYTE)); - pwd->b[4] = cpu_ldub_data(env, addr + (4 << DF_BYTE)); - pwd->b[5] = cpu_ldub_data(env, addr + (5 << DF_BYTE)); - pwd->b[6] = cpu_ldub_data(env, addr + (6 << DF_BYTE)); - pwd->b[7] = cpu_ldub_data(env, addr + (7 << DF_BYTE)); - pwd->b[8] = cpu_ldub_data(env, addr + (8 << DF_BYTE)); - pwd->b[9] = cpu_ldub_data(env, addr + (9 << DF_BYTE)); - pwd->b[10] = cpu_ldub_data(env, addr + (10 << DF_BYTE)); - pwd->b[11] = cpu_ldub_data(env, addr + (11 << DF_BYTE)); - pwd->b[12] = cpu_ldub_data(env, addr + (12 << DF_BYTE)); - pwd->b[13] = cpu_ldub_data(env, addr + (13 << DF_BYTE)); - pwd->b[14] = cpu_ldub_data(env, addr + (14 << DF_BYTE)); - pwd->b[15] = cpu_ldub_data(env, addr + (15 << DF_BYTE)); -#else - pwd->b[0] = cpu_ldub_data(env, addr + (7 << DF_BYTE)); - pwd->b[1] = cpu_ldub_data(env, addr + (6 << DF_BYTE)); - pwd->b[2] = cpu_ldub_data(env, addr + (5 << DF_BYTE)); - pwd->b[3] = cpu_ldub_data(env, addr + (4 << DF_BYTE)); - pwd->b[4] = cpu_ldub_data(env, addr + (3 << DF_BYTE)); - pwd->b[5] = cpu_ldub_data(env, addr + (2 << DF_BYTE)); - pwd->b[6] = cpu_ldub_data(env, addr + (1 << DF_BYTE)); - pwd->b[7] = cpu_ldub_data(env, addr + (0 << DF_BYTE)); - pwd->b[8] = cpu_ldub_data(env, addr + (15 << DF_BYTE)); - pwd->b[9] = cpu_ldub_data(env, addr + (14 << DF_BYTE)); - pwd->b[10] = cpu_ldub_data(env, addr + (13 << DF_BYTE)); - pwd->b[11] = cpu_ldub_data(env, addr + (12 << DF_BYTE)); - pwd->b[12] = cpu_ldub_data(env, addr + (11 << DF_BYTE)); - pwd->b[13] = cpu_ldub_data(env, addr + (10 << DF_BYTE)); - pwd->b[14] = cpu_ldub_data(env, addr + (9 << DF_BYTE)); - pwd->b[15] = cpu_ldub_data(env, addr + (8 << DF_BYTE)); -#endif -#endif + uintptr_t ra = GETPC(); + uint64_t d0, d1; + + /* Load 8 bytes at a time. Vector element ordering makes this LE. */ + d0 = cpu_ldq_le_data_ra(env, addr + 0, ra); + d1 = cpu_ldq_le_data_ra(env, addr + 8, ra); + pwd->d[0] = d0; + pwd->d[1] = d1; } void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd, target_ulong addr) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - MEMOP_IDX(DF_HALF) -#if !defined(CONFIG_USER_ONLY) -#if !defined(HOST_WORDS_BIGENDIAN) - pwd->h[0] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC()); - pwd->h[1] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC()); - pwd->h[2] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC()); - pwd->h[3] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC()); - pwd->h[4] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC()); - pwd->h[5] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC()); - pwd->h[6] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC()); - pwd->h[7] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC()); -#else - pwd->h[0] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC()); - pwd->h[1] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC()); - pwd->h[2] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC()); - pwd->h[3] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC()); - pwd->h[4] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC()); - pwd->h[5] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC()); - pwd->h[6] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC()); - pwd->h[7] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC()); -#endif -#else -#if !defined(HOST_WORDS_BIGENDIAN) - pwd->h[0] = cpu_lduw_data(env, addr + (0 << DF_HALF)); - pwd->h[1] = cpu_lduw_data(env, addr + (1 << DF_HALF)); - pwd->h[2] = cpu_lduw_data(env, addr + (2 << DF_HALF)); - pwd->h[3] = cpu_lduw_data(env, addr + (3 << DF_HALF)); - pwd->h[4] = cpu_lduw_data(env, addr + (4 << DF_HALF)); - pwd->h[5] = cpu_lduw_data(env, addr + (5 << DF_HALF)); - pwd->h[6] = cpu_lduw_data(env, addr + (6 << DF_HALF)); - pwd->h[7] = cpu_lduw_data(env, addr + (7 << DF_HALF)); -#else - pwd->h[0] = cpu_lduw_data(env, addr + (3 << DF_HALF)); - pwd->h[1] = cpu_lduw_data(env, addr + (2 << DF_HALF)); - pwd->h[2] = cpu_lduw_data(env, addr + (1 << DF_HALF)); - pwd->h[3] = cpu_lduw_data(env, addr + (0 << DF_HALF)); - pwd->h[4] = cpu_lduw_data(env, addr + (7 << DF_HALF)); - pwd->h[5] = cpu_lduw_data(env, addr + (6 << DF_HALF)); - pwd->h[6] = cpu_lduw_data(env, addr + (5 << DF_HALF)); - pwd->h[7] = cpu_lduw_data(env, addr + (4 << DF_HALF)); -#endif + uintptr_t ra = GETPC(); + uint64_t d0, d1; + + /* + * Load 8 bytes at a time. Use little-endian load, then for + * big-endian target, we must then swap the four halfwords. + */ + d0 = cpu_ldq_le_data_ra(env, addr + 0, ra); + d1 = cpu_ldq_le_data_ra(env, addr + 8, ra); +#ifdef TARGET_WORDS_BIGENDIAN + d0 = bswap16x4(d0); + d1 = bswap16x4(d1); #endif + pwd->d[0] = d0; + pwd->d[1] = d1; } void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd, target_ulong addr) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - MEMOP_IDX(DF_WORD) -#if !defined(CONFIG_USER_ONLY) -#if !defined(HOST_WORDS_BIGENDIAN) - pwd->w[0] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC()); - pwd->w[1] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC()); - pwd->w[2] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC()); - pwd->w[3] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC()); -#else - pwd->w[0] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC()); - pwd->w[1] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC()); - pwd->w[2] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC()); - pwd->w[3] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC()); -#endif -#else -#if !defined(HOST_WORDS_BIGENDIAN) - pwd->w[0] = cpu_ldl_data(env, addr + (0 << DF_WORD)); - pwd->w[1] = cpu_ldl_data(env, addr + (1 << DF_WORD)); - pwd->w[2] = cpu_ldl_data(env, addr + (2 << DF_WORD)); - pwd->w[3] = cpu_ldl_data(env, addr + (3 << DF_WORD)); -#else - pwd->w[0] = cpu_ldl_data(env, addr + (1 << DF_WORD)); - pwd->w[1] = cpu_ldl_data(env, addr + (0 << DF_WORD)); - pwd->w[2] = cpu_ldl_data(env, addr + (3 << DF_WORD)); - pwd->w[3] = cpu_ldl_data(env, addr + (2 << DF_WORD)); -#endif + uintptr_t ra = GETPC(); + uint64_t d0, d1; + + /* + * Load 8 bytes at a time. Use little-endian load, then for + * big-endian target, we must then bswap the two words. + */ + d0 = cpu_ldq_le_data_ra(env, addr + 0, ra); + d1 = cpu_ldq_le_data_ra(env, addr + 8, ra); +#ifdef TARGET_WORDS_BIGENDIAN + d0 = bswap32x2(d0); + d1 = bswap32x2(d1); #endif + pwd->d[0] = d0; + pwd->d[1] = d1; } void helper_msa_ld_d(CPUMIPSState *env, uint32_t wd, target_ulong addr) { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); - MEMOP_IDX(DF_DOUBLE) -#if !defined(CONFIG_USER_ONLY) - pwd->d[0] = helper_ret_ldq_mmu(env, addr + (0 << DF_DOUBLE), oi, GETPC()); - pwd->d[1] = helper_ret_ldq_mmu(env, addr + (1 << DF_DOUBLE), oi, GETPC()); -#else - pwd->d[0] = cpu_ldq_data(env, addr + (0 << DF_DOUBLE)); - pwd->d[1] = cpu_ldq_data(env, addr + (1 << DF_DOUBLE)); -#endif + uintptr_t ra = GETPC(); + uint64_t d0, d1; + + d0 = cpu_ldq_data_ra(env, addr + 0, ra); + d1 = cpu_ldq_data_ra(env, addr + 8, ra); + pwd->d[0] = d0; + pwd->d[1] = d1; } #define MSA_PAGESPAN(x) \ @@ -8415,82 +8323,13 @@ void helper_msa_st_b(CPUMIPSState *env, uint32_t wd, { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); int mmu_idx = cpu_mmu_index(env, false); + uintptr_t ra = GETPC(); - MEMOP_IDX(DF_BYTE) - ensure_writable_pages(env, addr, mmu_idx, GETPC()); -#if !defined(CONFIG_USER_ONLY) -#if !defined(HOST_WORDS_BIGENDIAN) - helper_ret_stb_mmu(env, addr + (0 << DF_BYTE), pwd->b[0], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (1 << DF_BYTE), pwd->b[1], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (2 << DF_BYTE), pwd->b[2], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (3 << DF_BYTE), pwd->b[3], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (4 << DF_BYTE), pwd->b[4], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (5 << DF_BYTE), pwd->b[5], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (6 << DF_BYTE), pwd->b[6], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (7 << DF_BYTE), pwd->b[7], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (8 << DF_BYTE), pwd->b[8], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (9 << DF_BYTE), pwd->b[9], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[10], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[11], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[12], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[13], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[14], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[15], oi, GETPC()); -#else - helper_ret_stb_mmu(env, addr + (7 << DF_BYTE), pwd->b[0], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (6 << DF_BYTE), pwd->b[1], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (5 << DF_BYTE), pwd->b[2], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (4 << DF_BYTE), pwd->b[3], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (3 << DF_BYTE), pwd->b[4], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (2 << DF_BYTE), pwd->b[5], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (1 << DF_BYTE), pwd->b[6], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (0 << DF_BYTE), pwd->b[7], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[8], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[9], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[10], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[11], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[12], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[13], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (9 << DF_BYTE), pwd->b[14], oi, GETPC()); - helper_ret_stb_mmu(env, addr + (8 << DF_BYTE), pwd->b[15], oi, GETPC()); -#endif -#else -#if !defined(HOST_WORDS_BIGENDIAN) - cpu_stb_data(env, addr + (0 << DF_BYTE), pwd->b[0]); - cpu_stb_data(env, addr + (1 << DF_BYTE), pwd->b[1]); - cpu_stb_data(env, addr + (2 << DF_BYTE), pwd->b[2]); - cpu_stb_data(env, addr + (3 << DF_BYTE), pwd->b[3]); - cpu_stb_data(env, addr + (4 << DF_BYTE), pwd->b[4]); - cpu_stb_data(env, addr + (5 << DF_BYTE), pwd->b[5]); - cpu_stb_data(env, addr + (6 << DF_BYTE), pwd->b[6]); - cpu_stb_data(env, addr + (7 << DF_BYTE), pwd->b[7]); - cpu_stb_data(env, addr + (8 << DF_BYTE), pwd->b[8]); - cpu_stb_data(env, addr + (9 << DF_BYTE), pwd->b[9]); - cpu_stb_data(env, addr + (10 << DF_BYTE), pwd->b[10]); - cpu_stb_data(env, addr + (11 << DF_BYTE), pwd->b[11]); - cpu_stb_data(env, addr + (12 << DF_BYTE), pwd->b[12]); - cpu_stb_data(env, addr + (13 << DF_BYTE), pwd->b[13]); - cpu_stb_data(env, addr + (14 << DF_BYTE), pwd->b[14]); - cpu_stb_data(env, addr + (15 << DF_BYTE), pwd->b[15]); -#else - cpu_stb_data(env, addr + (7 << DF_BYTE), pwd->b[0]); - cpu_stb_data(env, addr + (6 << DF_BYTE), pwd->b[1]); - cpu_stb_data(env, addr + (5 << DF_BYTE), pwd->b[2]); - cpu_stb_data(env, addr + (4 << DF_BYTE), pwd->b[3]); - cpu_stb_data(env, addr + (3 << DF_BYTE), pwd->b[4]); - cpu_stb_data(env, addr + (2 << DF_BYTE), pwd->b[5]); - cpu_stb_data(env, addr + (1 << DF_BYTE), pwd->b[6]); - cpu_stb_data(env, addr + (0 << DF_BYTE), pwd->b[7]); - cpu_stb_data(env, addr + (15 << DF_BYTE), pwd->b[8]); - cpu_stb_data(env, addr + (14 << DF_BYTE), pwd->b[9]); - cpu_stb_data(env, addr + (13 << DF_BYTE), pwd->b[10]); - cpu_stb_data(env, addr + (12 << DF_BYTE), pwd->b[11]); - cpu_stb_data(env, addr + (11 << DF_BYTE), pwd->b[12]); - cpu_stb_data(env, addr + (10 << DF_BYTE), pwd->b[13]); - cpu_stb_data(env, addr + (9 << DF_BYTE), pwd->b[14]); - cpu_stb_data(env, addr + (8 << DF_BYTE), pwd->b[15]); -#endif -#endif + ensure_writable_pages(env, addr, mmu_idx, ra); + + /* Store 8 bytes at a time. Vector element ordering makes this LE. */ + cpu_stq_le_data_ra(env, addr + 0, pwd->d[0], ra); + cpu_stq_le_data_ra(env, addr + 0, pwd->d[1], ra); } void helper_msa_st_h(CPUMIPSState *env, uint32_t wd, @@ -8498,50 +8337,20 @@ void helper_msa_st_h(CPUMIPSState *env, uint32_t wd, { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); int mmu_idx = cpu_mmu_index(env, false); + uintptr_t ra = GETPC(); + uint64_t d0, d1; - MEMOP_IDX(DF_HALF) - ensure_writable_pages(env, addr, mmu_idx, GETPC()); -#if !defined(CONFIG_USER_ONLY) -#if !defined(HOST_WORDS_BIGENDIAN) - helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[0], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[1], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[2], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[3], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[4], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[5], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[6], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[7], oi, GETPC()); -#else - helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[0], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[1], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[2], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[3], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[4], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[5], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[6], oi, GETPC()); - helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[7], oi, GETPC()); -#endif -#else -#if !defined(HOST_WORDS_BIGENDIAN) - cpu_stw_data(env, addr + (0 << DF_HALF), pwd->h[0]); - cpu_stw_data(env, addr + (1 << DF_HALF), pwd->h[1]); - cpu_stw_data(env, addr + (2 << DF_HALF), pwd->h[2]); - cpu_stw_data(env, addr + (3 << DF_HALF), pwd->h[3]); - cpu_stw_data(env, addr + (4 << DF_HALF), pwd->h[4]); - cpu_stw_data(env, addr + (5 << DF_HALF), pwd->h[5]); - cpu_stw_data(env, addr + (6 << DF_HALF), pwd->h[6]); - cpu_stw_data(env, addr + (7 << DF_HALF), pwd->h[7]); -#else - cpu_stw_data(env, addr + (3 << DF_HALF), pwd->h[0]); - cpu_stw_data(env, addr + (2 << DF_HALF), pwd->h[1]); - cpu_stw_data(env, addr + (1 << DF_HALF), pwd->h[2]); - cpu_stw_data(env, addr + (0 << DF_HALF), pwd->h[3]); - cpu_stw_data(env, addr + (7 << DF_HALF), pwd->h[4]); - cpu_stw_data(env, addr + (6 << DF_HALF), pwd->h[5]); - cpu_stw_data(env, addr + (5 << DF_HALF), pwd->h[6]); - cpu_stw_data(env, addr + (4 << DF_HALF), pwd->h[7]); -#endif + ensure_writable_pages(env, addr, mmu_idx, ra); + + /* Store 8 bytes at a time. See helper_msa_ld_h. */ + d0 = pwd->d[0]; + d1 = pwd->d[1]; +#ifdef TARGET_WORDS_BIGENDIAN + d0 = bswap16x4(d0); + d1 = bswap16x4(d1); #endif + cpu_stq_le_data_ra(env, addr + 0, d0, ra); + cpu_stq_le_data_ra(env, addr + 8, d1, ra); } void helper_msa_st_w(CPUMIPSState *env, uint32_t wd, @@ -8549,34 +8358,20 @@ void helper_msa_st_w(CPUMIPSState *env, uint32_t wd, { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); int mmu_idx = cpu_mmu_index(env, false); + uintptr_t ra = GETPC(); + uint64_t d0, d1; - MEMOP_IDX(DF_WORD) - ensure_writable_pages(env, addr, mmu_idx, GETPC()); -#if !defined(CONFIG_USER_ONLY) -#if !defined(HOST_WORDS_BIGENDIAN) - helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[0], oi, GETPC()); - helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[1], oi, GETPC()); - helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[2], oi, GETPC()); - helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[3], oi, GETPC()); -#else - helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[0], oi, GETPC()); - helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[1], oi, GETPC()); - helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[2], oi, GETPC()); - helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[3], oi, GETPC()); -#endif -#else -#if !defined(HOST_WORDS_BIGENDIAN) - cpu_stl_data(env, addr + (0 << DF_WORD), pwd->w[0]); - cpu_stl_data(env, addr + (1 << DF_WORD), pwd->w[1]); - cpu_stl_data(env, addr + (2 << DF_WORD), pwd->w[2]); - cpu_stl_data(env, addr + (3 << DF_WORD), pwd->w[3]); -#else - cpu_stl_data(env, addr + (1 << DF_WORD), pwd->w[0]); - cpu_stl_data(env, addr + (0 << DF_WORD), pwd->w[1]); - cpu_stl_data(env, addr + (3 << DF_WORD), pwd->w[2]); - cpu_stl_data(env, addr + (2 << DF_WORD), pwd->w[3]); -#endif + ensure_writable_pages(env, addr, mmu_idx, ra); + + /* Store 8 bytes at a time. See helper_msa_ld_w. */ + d0 = pwd->d[0]; + d1 = pwd->d[1]; +#ifdef TARGET_WORDS_BIGENDIAN + d0 = bswap32x2(d0); + d1 = bswap32x2(d1); #endif + cpu_stq_le_data_ra(env, addr + 0, d0, ra); + cpu_stq_le_data_ra(env, addr + 8, d1, ra); } void helper_msa_st_d(CPUMIPSState *env, uint32_t wd, @@ -8584,14 +8379,10 @@ void helper_msa_st_d(CPUMIPSState *env, uint32_t wd, { wr_t *pwd = &(env->active_fpu.fpr[wd].wr); int mmu_idx = cpu_mmu_index(env, false); + uintptr_t ra = GETPC(); - MEMOP_IDX(DF_DOUBLE) ensure_writable_pages(env, addr, mmu_idx, GETPC()); -#if !defined(CONFIG_USER_ONLY) - helper_ret_stq_mmu(env, addr + (0 << DF_DOUBLE), pwd->d[0], oi, GETPC()); - helper_ret_stq_mmu(env, addr + (1 << DF_DOUBLE), pwd->d[1], oi, GETPC()); -#else - cpu_stq_data(env, addr + (0 << DF_DOUBLE), pwd->d[0]); - cpu_stq_data(env, addr + (1 << DF_DOUBLE), pwd->d[1]); -#endif + + cpu_stq_data_ra(env, addr + 0, pwd->d[0], ra); + cpu_stq_data_ra(env, addr + 8, pwd->d[1], ra); } diff --git a/target/mips/tcg/msa_translate.c b/target/mips/tcg/msa_translate.c index 8170a8df26..7576b3ed86 100644 --- a/target/mips/tcg/msa_translate.c +++ b/target/mips/tcg/msa_translate.c @@ -17,243 +17,24 @@ #include "fpu_helper.h" #include "internal.h" +static int elm_n(DisasContext *ctx, int x); +static int elm_df(DisasContext *ctx, int x); +static int bit_m(DisasContext *ctx, int x); +static int bit_df(DisasContext *ctx, int x); + +static inline int plus_1(DisasContext *s, int x) +{ + return x + 1; +} + +static inline int plus_2(DisasContext *s, int x) +{ + return x + 2; +} + /* Include the auto-generated decoder. */ #include "decode-msa.c.inc" -#define OPC_MSA (0x1E << 26) - -#define MASK_MSA_MINOR(op) (MASK_OP_MAJOR(op) | (op & 0x3F)) -enum { - OPC_MSA_I8_00 = 0x00 | OPC_MSA, - OPC_MSA_I8_01 = 0x01 | OPC_MSA, - OPC_MSA_I8_02 = 0x02 | OPC_MSA, - OPC_MSA_I5_06 = 0x06 | OPC_MSA, - OPC_MSA_I5_07 = 0x07 | OPC_MSA, - OPC_MSA_BIT_09 = 0x09 | OPC_MSA, - OPC_MSA_BIT_0A = 0x0A | OPC_MSA, - OPC_MSA_3R_0D = 0x0D | OPC_MSA, - OPC_MSA_3R_0E = 0x0E | OPC_MSA, - OPC_MSA_3R_0F = 0x0F | OPC_MSA, - OPC_MSA_3R_10 = 0x10 | OPC_MSA, - OPC_MSA_3R_11 = 0x11 | OPC_MSA, - OPC_MSA_3R_12 = 0x12 | OPC_MSA, - OPC_MSA_3R_13 = 0x13 | OPC_MSA, - OPC_MSA_3R_14 = 0x14 | OPC_MSA, - OPC_MSA_3R_15 = 0x15 | OPC_MSA, - OPC_MSA_ELM = 0x19 | OPC_MSA, - OPC_MSA_3RF_1A = 0x1A | OPC_MSA, - OPC_MSA_3RF_1B = 0x1B | OPC_MSA, - OPC_MSA_3RF_1C = 0x1C | OPC_MSA, - OPC_MSA_VEC = 0x1E | OPC_MSA, - - /* MI10 instruction */ - OPC_LD_B = (0x20) | OPC_MSA, - OPC_LD_H = (0x21) | OPC_MSA, - OPC_LD_W = (0x22) | OPC_MSA, - OPC_LD_D = (0x23) | OPC_MSA, - OPC_ST_B = (0x24) | OPC_MSA, - OPC_ST_H = (0x25) | OPC_MSA, - OPC_ST_W = (0x26) | OPC_MSA, - OPC_ST_D = (0x27) | OPC_MSA, -}; - -enum { - /* I5 instruction df(bits 22..21) = _b, _h, _w, _d */ - OPC_ADDVI_df = (0x0 << 23) | OPC_MSA_I5_06, - OPC_CEQI_df = (0x0 << 23) | OPC_MSA_I5_07, - OPC_SUBVI_df = (0x1 << 23) | OPC_MSA_I5_06, - OPC_MAXI_S_df = (0x2 << 23) | OPC_MSA_I5_06, - OPC_CLTI_S_df = (0x2 << 23) | OPC_MSA_I5_07, - OPC_MAXI_U_df = (0x3 << 23) | OPC_MSA_I5_06, - OPC_CLTI_U_df = (0x3 << 23) | OPC_MSA_I5_07, - OPC_MINI_S_df = (0x4 << 23) | OPC_MSA_I5_06, - OPC_CLEI_S_df = (0x4 << 23) | OPC_MSA_I5_07, - OPC_MINI_U_df = (0x5 << 23) | OPC_MSA_I5_06, - OPC_CLEI_U_df = (0x5 << 23) | OPC_MSA_I5_07, - OPC_LDI_df = (0x6 << 23) | OPC_MSA_I5_07, - - /* I8 instruction */ - OPC_ANDI_B = (0x0 << 24) | OPC_MSA_I8_00, - OPC_BMNZI_B = (0x0 << 24) | OPC_MSA_I8_01, - OPC_SHF_B = (0x0 << 24) | OPC_MSA_I8_02, - OPC_ORI_B = (0x1 << 24) | OPC_MSA_I8_00, - OPC_BMZI_B = (0x1 << 24) | OPC_MSA_I8_01, - OPC_SHF_H = (0x1 << 24) | OPC_MSA_I8_02, - OPC_NORI_B = (0x2 << 24) | OPC_MSA_I8_00, - OPC_BSELI_B = (0x2 << 24) | OPC_MSA_I8_01, - OPC_SHF_W = (0x2 << 24) | OPC_MSA_I8_02, - OPC_XORI_B = (0x3 << 24) | OPC_MSA_I8_00, - - /* VEC/2R/2RF instruction */ - OPC_AND_V = (0x00 << 21) | OPC_MSA_VEC, - OPC_OR_V = (0x01 << 21) | OPC_MSA_VEC, - OPC_NOR_V = (0x02 << 21) | OPC_MSA_VEC, - OPC_XOR_V = (0x03 << 21) | OPC_MSA_VEC, - OPC_BMNZ_V = (0x04 << 21) | OPC_MSA_VEC, - OPC_BMZ_V = (0x05 << 21) | OPC_MSA_VEC, - OPC_BSEL_V = (0x06 << 21) | OPC_MSA_VEC, - - OPC_MSA_2R = (0x18 << 21) | OPC_MSA_VEC, - OPC_MSA_2RF = (0x19 << 21) | OPC_MSA_VEC, - - /* 2R instruction df(bits 17..16) = _b, _h, _w, _d */ - OPC_FILL_df = (0x00 << 18) | OPC_MSA_2R, - OPC_PCNT_df = (0x01 << 18) | OPC_MSA_2R, - OPC_NLOC_df = (0x02 << 18) | OPC_MSA_2R, - OPC_NLZC_df = (0x03 << 18) | OPC_MSA_2R, - - /* 2RF instruction df(bit 16) = _w, _d */ - OPC_FCLASS_df = (0x00 << 17) | OPC_MSA_2RF, - OPC_FTRUNC_S_df = (0x01 << 17) | OPC_MSA_2RF, - OPC_FTRUNC_U_df = (0x02 << 17) | OPC_MSA_2RF, - OPC_FSQRT_df = (0x03 << 17) | OPC_MSA_2RF, - OPC_FRSQRT_df = (0x04 << 17) | OPC_MSA_2RF, - OPC_FRCP_df = (0x05 << 17) | OPC_MSA_2RF, - OPC_FRINT_df = (0x06 << 17) | OPC_MSA_2RF, - OPC_FLOG2_df = (0x07 << 17) | OPC_MSA_2RF, - OPC_FEXUPL_df = (0x08 << 17) | OPC_MSA_2RF, - OPC_FEXUPR_df = (0x09 << 17) | OPC_MSA_2RF, - OPC_FFQL_df = (0x0A << 17) | OPC_MSA_2RF, - OPC_FFQR_df = (0x0B << 17) | OPC_MSA_2RF, - OPC_FTINT_S_df = (0x0C << 17) | OPC_MSA_2RF, - OPC_FTINT_U_df = (0x0D << 17) | OPC_MSA_2RF, - OPC_FFINT_S_df = (0x0E << 17) | OPC_MSA_2RF, - OPC_FFINT_U_df = (0x0F << 17) | OPC_MSA_2RF, - - /* 3R instruction df(bits 22..21) = _b, _h, _w, d */ - OPC_SLL_df = (0x0 << 23) | OPC_MSA_3R_0D, - OPC_ADDV_df = (0x0 << 23) | OPC_MSA_3R_0E, - OPC_CEQ_df = (0x0 << 23) | OPC_MSA_3R_0F, - OPC_ADD_A_df = (0x0 << 23) | OPC_MSA_3R_10, - OPC_SUBS_S_df = (0x0 << 23) | OPC_MSA_3R_11, - OPC_MULV_df = (0x0 << 23) | OPC_MSA_3R_12, - OPC_DOTP_S_df = (0x0 << 23) | OPC_MSA_3R_13, - OPC_SLD_df = (0x0 << 23) | OPC_MSA_3R_14, - OPC_VSHF_df = (0x0 << 23) | OPC_MSA_3R_15, - OPC_SRA_df = (0x1 << 23) | OPC_MSA_3R_0D, - OPC_SUBV_df = (0x1 << 23) | OPC_MSA_3R_0E, - OPC_ADDS_A_df = (0x1 << 23) | OPC_MSA_3R_10, - OPC_SUBS_U_df = (0x1 << 23) | OPC_MSA_3R_11, - OPC_MADDV_df = (0x1 << 23) | OPC_MSA_3R_12, - OPC_DOTP_U_df = (0x1 << 23) | OPC_MSA_3R_13, - OPC_SPLAT_df = (0x1 << 23) | OPC_MSA_3R_14, - OPC_SRAR_df = (0x1 << 23) | OPC_MSA_3R_15, - OPC_SRL_df = (0x2 << 23) | OPC_MSA_3R_0D, - OPC_MAX_S_df = (0x2 << 23) | OPC_MSA_3R_0E, - OPC_CLT_S_df = (0x2 << 23) | OPC_MSA_3R_0F, - OPC_ADDS_S_df = (0x2 << 23) | OPC_MSA_3R_10, - OPC_SUBSUS_U_df = (0x2 << 23) | OPC_MSA_3R_11, - OPC_MSUBV_df = (0x2 << 23) | OPC_MSA_3R_12, - OPC_DPADD_S_df = (0x2 << 23) | OPC_MSA_3R_13, - OPC_PCKEV_df = (0x2 << 23) | OPC_MSA_3R_14, - OPC_SRLR_df = (0x2 << 23) | OPC_MSA_3R_15, - OPC_BCLR_df = (0x3 << 23) | OPC_MSA_3R_0D, - OPC_MAX_U_df = (0x3 << 23) | OPC_MSA_3R_0E, - OPC_CLT_U_df = (0x3 << 23) | OPC_MSA_3R_0F, - OPC_ADDS_U_df = (0x3 << 23) | OPC_MSA_3R_10, - OPC_SUBSUU_S_df = (0x3 << 23) | OPC_MSA_3R_11, - OPC_DPADD_U_df = (0x3 << 23) | OPC_MSA_3R_13, - OPC_PCKOD_df = (0x3 << 23) | OPC_MSA_3R_14, - OPC_BSET_df = (0x4 << 23) | OPC_MSA_3R_0D, - OPC_MIN_S_df = (0x4 << 23) | OPC_MSA_3R_0E, - OPC_CLE_S_df = (0x4 << 23) | OPC_MSA_3R_0F, - OPC_AVE_S_df = (0x4 << 23) | OPC_MSA_3R_10, - OPC_ASUB_S_df = (0x4 << 23) | OPC_MSA_3R_11, - OPC_DIV_S_df = (0x4 << 23) | OPC_MSA_3R_12, - OPC_DPSUB_S_df = (0x4 << 23) | OPC_MSA_3R_13, - OPC_ILVL_df = (0x4 << 23) | OPC_MSA_3R_14, - OPC_HADD_S_df = (0x4 << 23) | OPC_MSA_3R_15, - OPC_BNEG_df = (0x5 << 23) | OPC_MSA_3R_0D, - OPC_MIN_U_df = (0x5 << 23) | OPC_MSA_3R_0E, - OPC_CLE_U_df = (0x5 << 23) | OPC_MSA_3R_0F, - OPC_AVE_U_df = (0x5 << 23) | OPC_MSA_3R_10, - OPC_ASUB_U_df = (0x5 << 23) | OPC_MSA_3R_11, - OPC_DIV_U_df = (0x5 << 23) | OPC_MSA_3R_12, - OPC_DPSUB_U_df = (0x5 << 23) | OPC_MSA_3R_13, - OPC_ILVR_df = (0x5 << 23) | OPC_MSA_3R_14, - OPC_HADD_U_df = (0x5 << 23) | OPC_MSA_3R_15, - OPC_BINSL_df = (0x6 << 23) | OPC_MSA_3R_0D, - OPC_MAX_A_df = (0x6 << 23) | OPC_MSA_3R_0E, - OPC_AVER_S_df = (0x6 << 23) | OPC_MSA_3R_10, - OPC_MOD_S_df = (0x6 << 23) | OPC_MSA_3R_12, - OPC_ILVEV_df = (0x6 << 23) | OPC_MSA_3R_14, - OPC_HSUB_S_df = (0x6 << 23) | OPC_MSA_3R_15, - OPC_BINSR_df = (0x7 << 23) | OPC_MSA_3R_0D, - OPC_MIN_A_df = (0x7 << 23) | OPC_MSA_3R_0E, - OPC_AVER_U_df = (0x7 << 23) | OPC_MSA_3R_10, - OPC_MOD_U_df = (0x7 << 23) | OPC_MSA_3R_12, - OPC_ILVOD_df = (0x7 << 23) | OPC_MSA_3R_14, - OPC_HSUB_U_df = (0x7 << 23) | OPC_MSA_3R_15, - - /* ELM instructions df(bits 21..16) = _b, _h, _w, _d */ - OPC_SLDI_df = (0x0 << 22) | (0x00 << 16) | OPC_MSA_ELM, - OPC_CTCMSA = (0x0 << 22) | (0x3E << 16) | OPC_MSA_ELM, - OPC_SPLATI_df = (0x1 << 22) | (0x00 << 16) | OPC_MSA_ELM, - OPC_CFCMSA = (0x1 << 22) | (0x3E << 16) | OPC_MSA_ELM, - OPC_COPY_S_df = (0x2 << 22) | (0x00 << 16) | OPC_MSA_ELM, - OPC_MOVE_V = (0x2 << 22) | (0x3E << 16) | OPC_MSA_ELM, - OPC_COPY_U_df = (0x3 << 22) | (0x00 << 16) | OPC_MSA_ELM, - OPC_INSERT_df = (0x4 << 22) | (0x00 << 16) | OPC_MSA_ELM, - OPC_INSVE_df = (0x5 << 22) | (0x00 << 16) | OPC_MSA_ELM, - - /* 3RF instruction _df(bit 21) = _w, _d */ - OPC_FCAF_df = (0x0 << 22) | OPC_MSA_3RF_1A, - OPC_FADD_df = (0x0 << 22) | OPC_MSA_3RF_1B, - OPC_FCUN_df = (0x1 << 22) | OPC_MSA_3RF_1A, - OPC_FSUB_df = (0x1 << 22) | OPC_MSA_3RF_1B, - OPC_FCOR_df = (0x1 << 22) | OPC_MSA_3RF_1C, - OPC_FCEQ_df = (0x2 << 22) | OPC_MSA_3RF_1A, - OPC_FMUL_df = (0x2 << 22) | OPC_MSA_3RF_1B, - OPC_FCUNE_df = (0x2 << 22) | OPC_MSA_3RF_1C, - OPC_FCUEQ_df = (0x3 << 22) | OPC_MSA_3RF_1A, - OPC_FDIV_df = (0x3 << 22) | OPC_MSA_3RF_1B, - OPC_FCNE_df = (0x3 << 22) | OPC_MSA_3RF_1C, - OPC_FCLT_df = (0x4 << 22) | OPC_MSA_3RF_1A, - OPC_FMADD_df = (0x4 << 22) | OPC_MSA_3RF_1B, - OPC_MUL_Q_df = (0x4 << 22) | OPC_MSA_3RF_1C, - OPC_FCULT_df = (0x5 << 22) | OPC_MSA_3RF_1A, - OPC_FMSUB_df = (0x5 << 22) | OPC_MSA_3RF_1B, - OPC_MADD_Q_df = (0x5 << 22) | OPC_MSA_3RF_1C, - OPC_FCLE_df = (0x6 << 22) | OPC_MSA_3RF_1A, - OPC_MSUB_Q_df = (0x6 << 22) | OPC_MSA_3RF_1C, - OPC_FCULE_df = (0x7 << 22) | OPC_MSA_3RF_1A, - OPC_FEXP2_df = (0x7 << 22) | OPC_MSA_3RF_1B, - OPC_FSAF_df = (0x8 << 22) | OPC_MSA_3RF_1A, - OPC_FEXDO_df = (0x8 << 22) | OPC_MSA_3RF_1B, - OPC_FSUN_df = (0x9 << 22) | OPC_MSA_3RF_1A, - OPC_FSOR_df = (0x9 << 22) | OPC_MSA_3RF_1C, - OPC_FSEQ_df = (0xA << 22) | OPC_MSA_3RF_1A, - OPC_FTQ_df = (0xA << 22) | OPC_MSA_3RF_1B, - OPC_FSUNE_df = (0xA << 22) | OPC_MSA_3RF_1C, - OPC_FSUEQ_df = (0xB << 22) | OPC_MSA_3RF_1A, - OPC_FSNE_df = (0xB << 22) | OPC_MSA_3RF_1C, - OPC_FSLT_df = (0xC << 22) | OPC_MSA_3RF_1A, - OPC_FMIN_df = (0xC << 22) | OPC_MSA_3RF_1B, - OPC_MULR_Q_df = (0xC << 22) | OPC_MSA_3RF_1C, - OPC_FSULT_df = (0xD << 22) | OPC_MSA_3RF_1A, - OPC_FMIN_A_df = (0xD << 22) | OPC_MSA_3RF_1B, - OPC_MADDR_Q_df = (0xD << 22) | OPC_MSA_3RF_1C, - OPC_FSLE_df = (0xE << 22) | OPC_MSA_3RF_1A, - OPC_FMAX_df = (0xE << 22) | OPC_MSA_3RF_1B, - OPC_MSUBR_Q_df = (0xE << 22) | OPC_MSA_3RF_1C, - OPC_FSULE_df = (0xF << 22) | OPC_MSA_3RF_1A, - OPC_FMAX_A_df = (0xF << 22) | OPC_MSA_3RF_1B, - - /* BIT instruction df(bits 22..16) = _B _H _W _D */ - OPC_SLLI_df = (0x0 << 23) | OPC_MSA_BIT_09, - OPC_SAT_S_df = (0x0 << 23) | OPC_MSA_BIT_0A, - OPC_SRAI_df = (0x1 << 23) | OPC_MSA_BIT_09, - OPC_SAT_U_df = (0x1 << 23) | OPC_MSA_BIT_0A, - OPC_SRLI_df = (0x2 << 23) | OPC_MSA_BIT_09, - OPC_SRARI_df = (0x2 << 23) | OPC_MSA_BIT_0A, - OPC_BCLRI_df = (0x3 << 23) | OPC_MSA_BIT_09, - OPC_SRLRI_df = (0x3 << 23) | OPC_MSA_BIT_0A, - OPC_BSETI_df = (0x4 << 23) | OPC_MSA_BIT_09, - OPC_BNEGI_df = (0x5 << 23) | OPC_MSA_BIT_09, - OPC_BINSLI_df = (0x6 << 23) | OPC_MSA_BIT_09, - OPC_BINSRI_df = (0x7 << 23) | OPC_MSA_BIT_09, -}; - static const char msaregnames[][6] = { "w0.d0", "w0.d1", "w1.d0", "w1.d1", "w2.d0", "w2.d1", "w3.d0", "w3.d1", @@ -273,6 +54,77 @@ static const char msaregnames[][6] = { "w30.d0", "w30.d1", "w31.d0", "w31.d1", }; +/* Encoding of Operation Field (must be indexed by CPUMIPSMSADataFormat) */ +struct dfe { + int start; + int length; + uint32_t mask; +}; + +/* + * Extract immediate from df/{m,n} format (used by ELM & BIT instructions). + * Returns the immediate value, or -1 if the format does not match. + */ +static int df_extract_val(DisasContext *ctx, int x, const struct dfe *s) +{ + for (unsigned i = 0; i < 4; i++) { + if (extract32(x, s->start, s->length) == s->mask) { + return extract32(x, 0, s->start); + } + } + return -1; +} + +/* + * Extract DataField from df/{m,n} format (used by ELM & BIT instructions). + * Returns the DataField, or -1 if the format does not match. + */ +static int df_extract_df(DisasContext *ctx, int x, const struct dfe *s) +{ + for (unsigned i = 0; i < 4; i++) { + if (extract32(x, s->start, s->length) == s->mask) { + return i; + } + } + return -1; +} + +static const struct dfe df_elm[] = { + /* Table 3.26 ELM Instruction Format */ + [DF_BYTE] = {4, 2, 0b00}, + [DF_HALF] = {3, 3, 0b100}, + [DF_WORD] = {2, 4, 0b1100}, + [DF_DOUBLE] = {1, 5, 0b11100} +}; + +static int elm_n(DisasContext *ctx, int x) +{ + return df_extract_val(ctx, x, df_elm); +} + +static int elm_df(DisasContext *ctx, int x) +{ + return df_extract_df(ctx, x, df_elm); +} + +static const struct dfe df_bit[] = { + /* Table 3.28 BIT Instruction Format */ + [DF_BYTE] = {3, 4, 0b1110}, + [DF_HALF] = {4, 3, 0b110}, + [DF_WORD] = {5, 2, 0b10}, + [DF_DOUBLE] = {6, 1, 0b0} +}; + +static int bit_m(DisasContext *ctx, int x) +{ + return df_extract_val(ctx, x, df_bit); +} + +static int bit_df(DisasContext *ctx, int x) +{ + return df_extract_df(ctx, x, df_bit); +} + static TCGv_i64 msa_wr_d[64]; void msa_translate_init(void) @@ -280,61 +132,80 @@ void msa_translate_init(void) int i; for (i = 0; i < 32; i++) { - int off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[0]); + int off; /* * The MSA vector registers are mapped on the * scalar floating-point unit (FPU) registers. */ + off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[0]); msa_wr_d[i * 2] = fpu_f64[i]; + off = offsetof(CPUMIPSState, active_fpu.fpr[i].wr.d[1]); msa_wr_d[i * 2 + 1] = tcg_global_mem_new_i64(cpu_env, off, msaregnames[i * 2 + 1]); } } -static inline int check_msa_access(DisasContext *ctx) +/* + * Check if MSA is enabled. + * This function is always called with MSA available. + * If MSA is disabled, raise an exception. + */ +static inline bool check_msa_enabled(DisasContext *ctx) { if (unlikely((ctx->hflags & MIPS_HFLAG_FPU) && !(ctx->hflags & MIPS_HFLAG_F64))) { gen_reserved_instruction(ctx); - return 0; + return false; } if (unlikely(!(ctx->hflags & MIPS_HFLAG_MSA))) { generate_exception_end(ctx, EXCP_MSADIS); - return 0; + return false; } - return 1; + return true; } +typedef void gen_helper_piv(TCGv_ptr, TCGv_i32, TCGv); +typedef void gen_helper_pii(TCGv_ptr, TCGv_i32, TCGv_i32); +typedef void gen_helper_piii(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32); +typedef void gen_helper_piiii(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32, TCGv_i32); + +#define TRANS_DF_x(TYPE, NAME, trans_func, gen_func) \ + static gen_helper_p##TYPE * const NAME##_tab[4] = { \ + gen_func##_b, gen_func##_h, gen_func##_w, gen_func##_d \ + }; \ + TRANS(NAME, trans_func, NAME##_tab[a->df]) + +#define TRANS_DF_iv(NAME, trans_func, gen_func) \ + TRANS_DF_x(iv, NAME, trans_func, gen_func) + +#define TRANS_DF_ii(NAME, trans_func, gen_func) \ + TRANS_DF_x(ii, NAME, trans_func, gen_func) + +#define TRANS_DF_iii(NAME, trans_func, gen_func) \ + TRANS_DF_x(iii, NAME, trans_func, gen_func) + +#define TRANS_DF_iii_b(NAME, trans_func, gen_func) \ + static gen_helper_piii * const NAME##_tab[4] = { \ + NULL, gen_func##_h, gen_func##_w, gen_func##_d \ + }; \ + static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ + { \ + return trans_func(ctx, a, NAME##_tab[a->df]); \ + } + static void gen_check_zero_element(TCGv tresult, uint8_t df, uint8_t wt, TCGCond cond) { /* generates tcg ops to check if any element is 0 */ /* Note this function only works with MSA_WRLEN = 128 */ - uint64_t eval_zero_or_big = 0; - uint64_t eval_big = 0; + uint64_t eval_zero_or_big = dup_const(df, 1); + uint64_t eval_big = eval_zero_or_big << ((8 << df) - 1); TCGv_i64 t0 = tcg_temp_new_i64(); TCGv_i64 t1 = tcg_temp_new_i64(); - switch (df) { - case DF_BYTE: - eval_zero_or_big = 0x0101010101010101ULL; - eval_big = 0x8080808080808080ULL; - break; - case DF_HALF: - eval_zero_or_big = 0x0001000100010001ULL; - eval_big = 0x8000800080008000ULL; - break; - case DF_WORD: - eval_zero_or_big = 0x0000000100000001ULL; - eval_big = 0x8000000080000000ULL; - break; - case DF_DOUBLE: - eval_zero_or_big = 0x0000000000000001ULL; - eval_big = 0x8000000000000000ULL; - break; - } + tcg_gen_subi_i64(t0, msa_wr_d[wt << 1], eval_zero_or_big); tcg_gen_andc_i64(t0, t0, msa_wr_d[wt << 1]); tcg_gen_andi_i64(t0, t0, eval_big); @@ -350,11 +221,13 @@ static void gen_check_zero_element(TCGv tresult, uint8_t df, uint8_t wt, tcg_temp_free_i64(t1); } -static bool gen_msa_BxZ_V(DisasContext *ctx, int wt, int s16, TCGCond cond) +static bool gen_msa_BxZ_V(DisasContext *ctx, int wt, int sa, TCGCond cond) { TCGv_i64 t0; - check_msa_access(ctx); + if (!check_msa_enabled(ctx)) { + return true; + } if (ctx->hflags & MIPS_HFLAG_BMASK) { gen_reserved_instruction(ctx); @@ -366,7 +239,7 @@ static bool gen_msa_BxZ_V(DisasContext *ctx, int wt, int s16, TCGCond cond) tcg_gen_trunc_i64_tl(bcond, t0); tcg_temp_free_i64(t0); - ctx->btarget = ctx->base.pc_next + (s16 << 2) + 4; + ctx->btarget = ctx->base.pc_next + (sa << 2) + 4; ctx->hflags |= MIPS_HFLAG_BC; ctx->hflags |= MIPS_HFLAG_BDS32; @@ -376,17 +249,19 @@ static bool gen_msa_BxZ_V(DisasContext *ctx, int wt, int s16, TCGCond cond) static bool trans_BZ_V(DisasContext *ctx, arg_msa_bz *a) { - return gen_msa_BxZ_V(ctx, a->wt, a->s16, TCG_COND_EQ); + return gen_msa_BxZ_V(ctx, a->wt, a->sa, TCG_COND_EQ); } static bool trans_BNZ_V(DisasContext *ctx, arg_msa_bz *a) { - return gen_msa_BxZ_V(ctx, a->wt, a->s16, TCG_COND_NE); + return gen_msa_BxZ_V(ctx, a->wt, a->sa, TCG_COND_NE); } -static bool gen_msa_BxZ(DisasContext *ctx, int df, int wt, int s16, bool if_not) +static bool gen_msa_BxZ(DisasContext *ctx, int df, int wt, int sa, bool if_not) { - check_msa_access(ctx); + if (!check_msa_enabled(ctx)) { + return true; + } if (ctx->hflags & MIPS_HFLAG_BMASK) { gen_reserved_instruction(ctx); @@ -395,1872 +270,521 @@ static bool gen_msa_BxZ(DisasContext *ctx, int df, int wt, int s16, bool if_not) gen_check_zero_element(bcond, df, wt, if_not ? TCG_COND_EQ : TCG_COND_NE); - ctx->btarget = ctx->base.pc_next + (s16 << 2) + 4; + ctx->btarget = ctx->base.pc_next + (sa << 2) + 4; ctx->hflags |= MIPS_HFLAG_BC; ctx->hflags |= MIPS_HFLAG_BDS32; return true; } -static bool trans_BZ_x(DisasContext *ctx, arg_msa_bz *a) +static bool trans_BZ(DisasContext *ctx, arg_msa_bz *a) { - return gen_msa_BxZ(ctx, a->df, a->wt, a->s16, false); + return gen_msa_BxZ(ctx, a->df, a->wt, a->sa, false); } -static bool trans_BNZ_x(DisasContext *ctx, arg_msa_bz *a) +static bool trans_BNZ(DisasContext *ctx, arg_msa_bz *a) { - return gen_msa_BxZ(ctx, a->df, a->wt, a->s16, true); + return gen_msa_BxZ(ctx, a->df, a->wt, a->sa, true); } -static void gen_msa_i8(DisasContext *ctx) +static bool trans_msa_i8(DisasContext *ctx, arg_msa_i *a, + gen_helper_piii *gen_msa_i8) { -#define MASK_MSA_I8(op) (MASK_MSA_MINOR(op) | (op & (0x03 << 24))) - uint8_t i8 = (ctx->opcode >> 16) & 0xff; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - - TCGv_i32 twd = tcg_const_i32(wd); - TCGv_i32 tws = tcg_const_i32(ws); - TCGv_i32 ti8 = tcg_const_i32(i8); - - switch (MASK_MSA_I8(ctx->opcode)) { - case OPC_ANDI_B: - gen_helper_msa_andi_b(cpu_env, twd, tws, ti8); - break; - case OPC_ORI_B: - gen_helper_msa_ori_b(cpu_env, twd, tws, ti8); - break; - case OPC_NORI_B: - gen_helper_msa_nori_b(cpu_env, twd, tws, ti8); - break; - case OPC_XORI_B: - gen_helper_msa_xori_b(cpu_env, twd, tws, ti8); - break; - case OPC_BMNZI_B: - gen_helper_msa_bmnzi_b(cpu_env, twd, tws, ti8); - break; - case OPC_BMZI_B: - gen_helper_msa_bmzi_b(cpu_env, twd, tws, ti8); - break; - case OPC_BSELI_B: - gen_helper_msa_bseli_b(cpu_env, twd, tws, ti8); - break; - case OPC_SHF_B: - case OPC_SHF_H: - case OPC_SHF_W: - { - uint8_t df = (ctx->opcode >> 24) & 0x3; - if (df == DF_DOUBLE) { - gen_reserved_instruction(ctx); - } else { - TCGv_i32 tdf = tcg_const_i32(df); - gen_helper_msa_shf_df(cpu_env, tdf, twd, tws, ti8); - tcg_temp_free_i32(tdf); - } - } - break; - default: - MIPS_INVAL("MSA instruction"); - gen_reserved_instruction(ctx); - break; + if (!check_msa_enabled(ctx)) { + return true; } - tcg_temp_free_i32(twd); - tcg_temp_free_i32(tws); - tcg_temp_free_i32(ti8); -} - -static void gen_msa_i5(DisasContext *ctx) -{ -#define MASK_MSA_I5(op) (MASK_MSA_MINOR(op) | (op & (0x7 << 23))) - uint8_t df = (ctx->opcode >> 21) & 0x3; - int8_t s5 = (int8_t) sextract32(ctx->opcode, 16, 5); - uint8_t u5 = (ctx->opcode >> 16) & 0x1f; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - - TCGv_i32 tdf = tcg_const_i32(df); - TCGv_i32 twd = tcg_const_i32(wd); - TCGv_i32 tws = tcg_const_i32(ws); - TCGv_i32 timm = tcg_temp_new_i32(); - tcg_gen_movi_i32(timm, u5); - - switch (MASK_MSA_I5(ctx->opcode)) { - case OPC_ADDVI_df: - gen_helper_msa_addvi_df(cpu_env, tdf, twd, tws, timm); - break; - case OPC_SUBVI_df: - gen_helper_msa_subvi_df(cpu_env, tdf, twd, tws, timm); - break; - case OPC_MAXI_S_df: - tcg_gen_movi_i32(timm, s5); - gen_helper_msa_maxi_s_df(cpu_env, tdf, twd, tws, timm); - break; - case OPC_MAXI_U_df: - gen_helper_msa_maxi_u_df(cpu_env, tdf, twd, tws, timm); - break; - case OPC_MINI_S_df: - tcg_gen_movi_i32(timm, s5); - gen_helper_msa_mini_s_df(cpu_env, tdf, twd, tws, timm); - break; - case OPC_MINI_U_df: - gen_helper_msa_mini_u_df(cpu_env, tdf, twd, tws, timm); - break; - case OPC_CEQI_df: - tcg_gen_movi_i32(timm, s5); - gen_helper_msa_ceqi_df(cpu_env, tdf, twd, tws, timm); - break; - case OPC_CLTI_S_df: - tcg_gen_movi_i32(timm, s5); - gen_helper_msa_clti_s_df(cpu_env, tdf, twd, tws, timm); - break; - case OPC_CLTI_U_df: - gen_helper_msa_clti_u_df(cpu_env, tdf, twd, tws, timm); - break; - case OPC_CLEI_S_df: - tcg_gen_movi_i32(timm, s5); - gen_helper_msa_clei_s_df(cpu_env, tdf, twd, tws, timm); - break; - case OPC_CLEI_U_df: - gen_helper_msa_clei_u_df(cpu_env, tdf, twd, tws, timm); - break; - case OPC_LDI_df: - { - int32_t s10 = sextract32(ctx->opcode, 11, 10); - tcg_gen_movi_i32(timm, s10); - gen_helper_msa_ldi_df(cpu_env, tdf, twd, timm); - } - break; - default: - MIPS_INVAL("MSA instruction"); - gen_reserved_instruction(ctx); - break; - } - - tcg_temp_free_i32(tdf); - tcg_temp_free_i32(twd); - tcg_temp_free_i32(tws); - tcg_temp_free_i32(timm); -} - -static void gen_msa_bit(DisasContext *ctx) -{ -#define MASK_MSA_BIT(op) (MASK_MSA_MINOR(op) | (op & (0x7 << 23))) - uint8_t dfm = (ctx->opcode >> 16) & 0x7f; - uint32_t df = 0, m = 0; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - - TCGv_i32 tdf; - TCGv_i32 tm; - TCGv_i32 twd; - TCGv_i32 tws; - - if ((dfm & 0x40) == 0x00) { - m = dfm & 0x3f; - df = DF_DOUBLE; - } else if ((dfm & 0x60) == 0x40) { - m = dfm & 0x1f; - df = DF_WORD; - } else if ((dfm & 0x70) == 0x60) { - m = dfm & 0x0f; - df = DF_HALF; - } else if ((dfm & 0x78) == 0x70) { - m = dfm & 0x7; - df = DF_BYTE; - } else { - gen_reserved_instruction(ctx); - return; - } - - tdf = tcg_const_i32(df); - tm = tcg_const_i32(m); - twd = tcg_const_i32(wd); - tws = tcg_const_i32(ws); - - switch (MASK_MSA_BIT(ctx->opcode)) { - case OPC_SLLI_df: - gen_helper_msa_slli_df(cpu_env, tdf, twd, tws, tm); - break; - case OPC_SRAI_df: - gen_helper_msa_srai_df(cpu_env, tdf, twd, tws, tm); - break; - case OPC_SRLI_df: - gen_helper_msa_srli_df(cpu_env, tdf, twd, tws, tm); - break; - case OPC_BCLRI_df: - gen_helper_msa_bclri_df(cpu_env, tdf, twd, tws, tm); - break; - case OPC_BSETI_df: - gen_helper_msa_bseti_df(cpu_env, tdf, twd, tws, tm); - break; - case OPC_BNEGI_df: - gen_helper_msa_bnegi_df(cpu_env, tdf, twd, tws, tm); - break; - case OPC_BINSLI_df: - gen_helper_msa_binsli_df(cpu_env, tdf, twd, tws, tm); - break; - case OPC_BINSRI_df: - gen_helper_msa_binsri_df(cpu_env, tdf, twd, tws, tm); - break; - case OPC_SAT_S_df: - gen_helper_msa_sat_s_df(cpu_env, tdf, twd, tws, tm); - break; - case OPC_SAT_U_df: - gen_helper_msa_sat_u_df(cpu_env, tdf, twd, tws, tm); - break; - case OPC_SRARI_df: - gen_helper_msa_srari_df(cpu_env, tdf, twd, tws, tm); - break; - case OPC_SRLRI_df: - gen_helper_msa_srlri_df(cpu_env, tdf, twd, tws, tm); - break; - default: - MIPS_INVAL("MSA instruction"); - gen_reserved_instruction(ctx); - break; - } - - tcg_temp_free_i32(tdf); - tcg_temp_free_i32(tm); - tcg_temp_free_i32(twd); - tcg_temp_free_i32(tws); -} - -static void gen_msa_3r(DisasContext *ctx) -{ -#define MASK_MSA_3R(op) (MASK_MSA_MINOR(op) | (op & (0x7 << 23))) - uint8_t df = (ctx->opcode >> 21) & 0x3; - uint8_t wt = (ctx->opcode >> 16) & 0x1f; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - - TCGv_i32 tdf = tcg_const_i32(df); - TCGv_i32 twd = tcg_const_i32(wd); - TCGv_i32 tws = tcg_const_i32(ws); - TCGv_i32 twt = tcg_const_i32(wt); - - switch (MASK_MSA_3R(ctx->opcode)) { - case OPC_BINSL_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_binsl_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_binsl_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_binsl_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_binsl_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_BINSR_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_binsr_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_binsr_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_binsr_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_binsr_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_BCLR_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_bclr_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_bclr_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_bclr_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_bclr_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_BNEG_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_bneg_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_bneg_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_bneg_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_bneg_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_BSET_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_bset_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_bset_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_bset_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_bset_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_ADD_A_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_add_a_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_add_a_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_add_a_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_add_a_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_ADDS_A_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_adds_a_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_adds_a_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_adds_a_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_adds_a_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_ADDS_S_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_adds_s_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_adds_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_adds_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_adds_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_ADDS_U_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_adds_u_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_adds_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_adds_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_adds_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_ADDV_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_addv_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_addv_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_addv_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_addv_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_AVE_S_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_ave_s_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_ave_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_ave_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_ave_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_AVE_U_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_ave_u_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_ave_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_ave_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_ave_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_AVER_S_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_aver_s_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_aver_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_aver_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_aver_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_AVER_U_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_aver_u_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_aver_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_aver_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_aver_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_CEQ_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_ceq_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_ceq_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_ceq_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_ceq_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_CLE_S_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_cle_s_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_cle_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_cle_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_cle_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_CLE_U_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_cle_u_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_cle_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_cle_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_cle_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_CLT_S_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_clt_s_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_clt_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_clt_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_clt_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_CLT_U_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_clt_u_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_clt_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_clt_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_clt_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_DIV_S_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_div_s_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_div_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_div_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_div_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_DIV_U_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_div_u_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_div_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_div_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_div_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_MAX_A_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_max_a_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_max_a_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_max_a_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_max_a_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_MAX_S_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_max_s_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_max_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_max_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_max_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_MAX_U_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_max_u_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_max_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_max_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_max_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_MIN_A_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_min_a_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_min_a_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_min_a_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_min_a_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_MIN_S_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_min_s_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_min_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_min_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_min_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_MIN_U_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_min_u_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_min_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_min_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_min_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_MOD_S_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_mod_s_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_mod_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_mod_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_mod_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_MOD_U_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_mod_u_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_mod_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_mod_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_mod_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_MADDV_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_maddv_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_maddv_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_maddv_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_maddv_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_MSUBV_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_msubv_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_msubv_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_msubv_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_msubv_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_ASUB_S_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_asub_s_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_asub_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_asub_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_asub_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_ASUB_U_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_asub_u_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_asub_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_asub_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_asub_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_ILVEV_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_ilvev_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_ilvev_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_ilvev_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_ilvev_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_ILVOD_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_ilvod_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_ilvod_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_ilvod_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_ilvod_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_ILVL_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_ilvl_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_ilvl_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_ilvl_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_ilvl_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_ILVR_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_ilvr_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_ilvr_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_ilvr_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_ilvr_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_PCKEV_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_pckev_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_pckev_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_pckev_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_pckev_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_PCKOD_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_pckod_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_pckod_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_pckod_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_pckod_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_SLL_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_sll_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_sll_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_sll_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_sll_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_SRA_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_sra_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_sra_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_sra_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_sra_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_SRAR_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_srar_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_srar_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_srar_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_srar_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_SRL_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_srl_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_srl_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_srl_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_srl_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_SRLR_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_srlr_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_srlr_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_srlr_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_srlr_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_SUBS_S_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_subs_s_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_subs_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_subs_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_subs_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_MULV_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_mulv_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_mulv_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_mulv_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_mulv_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_SLD_df: - gen_helper_msa_sld_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_VSHF_df: - gen_helper_msa_vshf_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_SUBV_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_subv_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_subv_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_subv_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_subv_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_SUBS_U_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_subs_u_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_subs_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_subs_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_subs_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_SPLAT_df: - gen_helper_msa_splat_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_SUBSUS_U_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_subsus_u_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_subsus_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_subsus_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_subsus_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_SUBSUU_S_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_subsuu_s_b(cpu_env, twd, tws, twt); - break; - case DF_HALF: - gen_helper_msa_subsuu_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_subsuu_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_subsuu_s_d(cpu_env, twd, tws, twt); - break; - } - break; - - case OPC_DOTP_S_df: - case OPC_DOTP_U_df: - case OPC_DPADD_S_df: - case OPC_DPADD_U_df: - case OPC_DPSUB_S_df: - case OPC_HADD_S_df: - case OPC_DPSUB_U_df: - case OPC_HADD_U_df: - case OPC_HSUB_S_df: - case OPC_HSUB_U_df: - if (df == DF_BYTE) { - gen_reserved_instruction(ctx); - break; - } - switch (MASK_MSA_3R(ctx->opcode)) { - case OPC_HADD_S_df: - switch (df) { - case DF_HALF: - gen_helper_msa_hadd_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_hadd_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_hadd_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_HADD_U_df: - switch (df) { - case DF_HALF: - gen_helper_msa_hadd_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_hadd_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_hadd_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_HSUB_S_df: - switch (df) { - case DF_HALF: - gen_helper_msa_hsub_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_hsub_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_hsub_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_HSUB_U_df: - switch (df) { - case DF_HALF: - gen_helper_msa_hsub_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_hsub_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_hsub_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_DOTP_S_df: - switch (df) { - case DF_HALF: - gen_helper_msa_dotp_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_dotp_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_dotp_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_DOTP_U_df: - switch (df) { - case DF_HALF: - gen_helper_msa_dotp_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_dotp_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_dotp_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_DPADD_S_df: - switch (df) { - case DF_HALF: - gen_helper_msa_dpadd_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_dpadd_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_dpadd_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_DPADD_U_df: - switch (df) { - case DF_HALF: - gen_helper_msa_dpadd_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_dpadd_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_dpadd_u_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_DPSUB_S_df: - switch (df) { - case DF_HALF: - gen_helper_msa_dpsub_s_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_dpsub_s_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_dpsub_s_d(cpu_env, twd, tws, twt); - break; - } - break; - case OPC_DPSUB_U_df: - switch (df) { - case DF_HALF: - gen_helper_msa_dpsub_u_h(cpu_env, twd, tws, twt); - break; - case DF_WORD: - gen_helper_msa_dpsub_u_w(cpu_env, twd, tws, twt); - break; - case DF_DOUBLE: - gen_helper_msa_dpsub_u_d(cpu_env, twd, tws, twt); - break; - } - break; - } - break; - default: - MIPS_INVAL("MSA instruction"); - gen_reserved_instruction(ctx); - break; - } - tcg_temp_free_i32(twd); - tcg_temp_free_i32(tws); - tcg_temp_free_i32(twt); - tcg_temp_free_i32(tdf); -} - -static void gen_msa_elm_3e(DisasContext *ctx) -{ -#define MASK_MSA_ELM_DF3E(op) (MASK_MSA_MINOR(op) | (op & (0x3FF << 16))) - uint8_t source = (ctx->opcode >> 11) & 0x1f; - uint8_t dest = (ctx->opcode >> 6) & 0x1f; - TCGv telm = tcg_temp_new(); - TCGv_i32 tsr = tcg_const_i32(source); - TCGv_i32 tdt = tcg_const_i32(dest); - - switch (MASK_MSA_ELM_DF3E(ctx->opcode)) { - case OPC_CTCMSA: - gen_load_gpr(telm, source); - gen_helper_msa_ctcmsa(cpu_env, telm, tdt); - break; - case OPC_CFCMSA: - gen_helper_msa_cfcmsa(telm, cpu_env, tsr); - gen_store_gpr(telm, dest); - break; - case OPC_MOVE_V: - gen_helper_msa_move_v(cpu_env, tdt, tsr); - break; - default: - MIPS_INVAL("MSA instruction"); - gen_reserved_instruction(ctx); - break; - } - - tcg_temp_free(telm); - tcg_temp_free_i32(tdt); - tcg_temp_free_i32(tsr); -} - -static void gen_msa_elm_df(DisasContext *ctx, uint32_t df, uint32_t n) -{ -#define MASK_MSA_ELM(op) (MASK_MSA_MINOR(op) | (op & (0xf << 22))) - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - - TCGv_i32 tws = tcg_const_i32(ws); - TCGv_i32 twd = tcg_const_i32(wd); - TCGv_i32 tn = tcg_const_i32(n); - TCGv_i32 tdf = tcg_const_i32(df); - - switch (MASK_MSA_ELM(ctx->opcode)) { - case OPC_SLDI_df: - gen_helper_msa_sldi_df(cpu_env, tdf, twd, tws, tn); - break; - case OPC_SPLATI_df: - gen_helper_msa_splati_df(cpu_env, tdf, twd, tws, tn); - break; - case OPC_INSVE_df: - gen_helper_msa_insve_df(cpu_env, tdf, twd, tws, tn); - break; - case OPC_COPY_S_df: - case OPC_COPY_U_df: - case OPC_INSERT_df: -#if !defined(TARGET_MIPS64) - /* Double format valid only for MIPS64 */ - if (df == DF_DOUBLE) { - gen_reserved_instruction(ctx); - break; - } - if ((MASK_MSA_ELM(ctx->opcode) == OPC_COPY_U_df) && - (df == DF_WORD)) { - gen_reserved_instruction(ctx); - break; - } -#endif - switch (MASK_MSA_ELM(ctx->opcode)) { - case OPC_COPY_S_df: - if (likely(wd != 0)) { - switch (df) { - case DF_BYTE: - gen_helper_msa_copy_s_b(cpu_env, twd, tws, tn); - break; - case DF_HALF: - gen_helper_msa_copy_s_h(cpu_env, twd, tws, tn); - break; - case DF_WORD: - gen_helper_msa_copy_s_w(cpu_env, twd, tws, tn); - break; -#if defined(TARGET_MIPS64) - case DF_DOUBLE: - gen_helper_msa_copy_s_d(cpu_env, twd, tws, tn); - break; -#endif - default: - assert(0); - } - } - break; - case OPC_COPY_U_df: - if (likely(wd != 0)) { - switch (df) { - case DF_BYTE: - gen_helper_msa_copy_u_b(cpu_env, twd, tws, tn); - break; - case DF_HALF: - gen_helper_msa_copy_u_h(cpu_env, twd, tws, tn); - break; -#if defined(TARGET_MIPS64) - case DF_WORD: - gen_helper_msa_copy_u_w(cpu_env, twd, tws, tn); - break; -#endif - default: - assert(0); - } - } - break; - case OPC_INSERT_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_insert_b(cpu_env, twd, tws, tn); - break; - case DF_HALF: - gen_helper_msa_insert_h(cpu_env, twd, tws, tn); - break; - case DF_WORD: - gen_helper_msa_insert_w(cpu_env, twd, tws, tn); - break; -#if defined(TARGET_MIPS64) - case DF_DOUBLE: - gen_helper_msa_insert_d(cpu_env, twd, tws, tn); - break; -#endif - default: - assert(0); - } - break; - } - break; - default: - MIPS_INVAL("MSA instruction"); - gen_reserved_instruction(ctx); - } - tcg_temp_free_i32(twd); - tcg_temp_free_i32(tws); - tcg_temp_free_i32(tn); - tcg_temp_free_i32(tdf); -} - -static void gen_msa_elm(DisasContext *ctx) -{ - uint8_t dfn = (ctx->opcode >> 16) & 0x3f; - uint32_t df = 0, n = 0; - - if ((dfn & 0x30) == 0x00) { - n = dfn & 0x0f; - df = DF_BYTE; - } else if ((dfn & 0x38) == 0x20) { - n = dfn & 0x07; - df = DF_HALF; - } else if ((dfn & 0x3c) == 0x30) { - n = dfn & 0x03; - df = DF_WORD; - } else if ((dfn & 0x3e) == 0x38) { - n = dfn & 0x01; - df = DF_DOUBLE; - } else if (dfn == 0x3E) { - /* CTCMSA, CFCMSA, MOVE.V */ - gen_msa_elm_3e(ctx); - return; - } else { - gen_reserved_instruction(ctx); - return; - } - - gen_msa_elm_df(ctx, df, n); -} - -static void gen_msa_3rf(DisasContext *ctx) -{ -#define MASK_MSA_3RF(op) (MASK_MSA_MINOR(op) | (op & (0xf << 22))) - uint8_t df = (ctx->opcode >> 21) & 0x1; - uint8_t wt = (ctx->opcode >> 16) & 0x1f; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - - TCGv_i32 twd = tcg_const_i32(wd); - TCGv_i32 tws = tcg_const_i32(ws); - TCGv_i32 twt = tcg_const_i32(wt); - TCGv_i32 tdf = tcg_temp_new_i32(); - - /* adjust df value for floating-point instruction */ - tcg_gen_movi_i32(tdf, df + 2); - - switch (MASK_MSA_3RF(ctx->opcode)) { - case OPC_FCAF_df: - gen_helper_msa_fcaf_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FADD_df: - gen_helper_msa_fadd_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCUN_df: - gen_helper_msa_fcun_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSUB_df: - gen_helper_msa_fsub_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCOR_df: - gen_helper_msa_fcor_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCEQ_df: - gen_helper_msa_fceq_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FMUL_df: - gen_helper_msa_fmul_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCUNE_df: - gen_helper_msa_fcune_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCUEQ_df: - gen_helper_msa_fcueq_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FDIV_df: - gen_helper_msa_fdiv_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCNE_df: - gen_helper_msa_fcne_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCLT_df: - gen_helper_msa_fclt_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FMADD_df: - gen_helper_msa_fmadd_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_MUL_Q_df: - tcg_gen_movi_i32(tdf, df + 1); - gen_helper_msa_mul_q_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCULT_df: - gen_helper_msa_fcult_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FMSUB_df: - gen_helper_msa_fmsub_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_MADD_Q_df: - tcg_gen_movi_i32(tdf, df + 1); - gen_helper_msa_madd_q_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCLE_df: - gen_helper_msa_fcle_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_MSUB_Q_df: - tcg_gen_movi_i32(tdf, df + 1); - gen_helper_msa_msub_q_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FCULE_df: - gen_helper_msa_fcule_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FEXP2_df: - gen_helper_msa_fexp2_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSAF_df: - gen_helper_msa_fsaf_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FEXDO_df: - gen_helper_msa_fexdo_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSUN_df: - gen_helper_msa_fsun_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSOR_df: - gen_helper_msa_fsor_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSEQ_df: - gen_helper_msa_fseq_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FTQ_df: - gen_helper_msa_ftq_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSUNE_df: - gen_helper_msa_fsune_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSUEQ_df: - gen_helper_msa_fsueq_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSNE_df: - gen_helper_msa_fsne_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSLT_df: - gen_helper_msa_fslt_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FMIN_df: - gen_helper_msa_fmin_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_MULR_Q_df: - tcg_gen_movi_i32(tdf, df + 1); - gen_helper_msa_mulr_q_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSULT_df: - gen_helper_msa_fsult_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FMIN_A_df: - gen_helper_msa_fmin_a_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_MADDR_Q_df: - tcg_gen_movi_i32(tdf, df + 1); - gen_helper_msa_maddr_q_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSLE_df: - gen_helper_msa_fsle_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FMAX_df: - gen_helper_msa_fmax_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_MSUBR_Q_df: - tcg_gen_movi_i32(tdf, df + 1); - gen_helper_msa_msubr_q_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FSULE_df: - gen_helper_msa_fsule_df(cpu_env, tdf, twd, tws, twt); - break; - case OPC_FMAX_A_df: - gen_helper_msa_fmax_a_df(cpu_env, tdf, twd, tws, twt); - break; - default: - MIPS_INVAL("MSA instruction"); - gen_reserved_instruction(ctx); - break; - } - - tcg_temp_free_i32(twd); - tcg_temp_free_i32(tws); - tcg_temp_free_i32(twt); - tcg_temp_free_i32(tdf); -} - -static void gen_msa_2r(DisasContext *ctx) -{ -#define MASK_MSA_2R(op) (MASK_MSA_MINOR(op) | (op & (0x1f << 21)) | \ - (op & (0x7 << 18))) - uint8_t wt = (ctx->opcode >> 16) & 0x1f; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - uint8_t df = (ctx->opcode >> 16) & 0x3; - TCGv_i32 twd = tcg_const_i32(wd); - TCGv_i32 tws = tcg_const_i32(ws); - TCGv_i32 twt = tcg_const_i32(wt); - TCGv_i32 tdf = tcg_const_i32(df); - - switch (MASK_MSA_2R(ctx->opcode)) { - case OPC_FILL_df: -#if !defined(TARGET_MIPS64) - /* Double format valid only for MIPS64 */ - if (df == DF_DOUBLE) { - gen_reserved_instruction(ctx); - break; - } -#endif - gen_helper_msa_fill_df(cpu_env, tdf, twd, tws); /* trs */ - break; - case OPC_NLOC_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_nloc_b(cpu_env, twd, tws); - break; - case DF_HALF: - gen_helper_msa_nloc_h(cpu_env, twd, tws); - break; - case DF_WORD: - gen_helper_msa_nloc_w(cpu_env, twd, tws); - break; - case DF_DOUBLE: - gen_helper_msa_nloc_d(cpu_env, twd, tws); - break; - } - break; - case OPC_NLZC_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_nlzc_b(cpu_env, twd, tws); - break; - case DF_HALF: - gen_helper_msa_nlzc_h(cpu_env, twd, tws); - break; - case DF_WORD: - gen_helper_msa_nlzc_w(cpu_env, twd, tws); - break; - case DF_DOUBLE: - gen_helper_msa_nlzc_d(cpu_env, twd, tws); - break; - } - break; - case OPC_PCNT_df: - switch (df) { - case DF_BYTE: - gen_helper_msa_pcnt_b(cpu_env, twd, tws); - break; - case DF_HALF: - gen_helper_msa_pcnt_h(cpu_env, twd, tws); - break; - case DF_WORD: - gen_helper_msa_pcnt_w(cpu_env, twd, tws); - break; - case DF_DOUBLE: - gen_helper_msa_pcnt_d(cpu_env, twd, tws); - break; - } - break; - default: - MIPS_INVAL("MSA instruction"); - gen_reserved_instruction(ctx); - break; - } - - tcg_temp_free_i32(twd); - tcg_temp_free_i32(tws); - tcg_temp_free_i32(twt); - tcg_temp_free_i32(tdf); -} - -static void gen_msa_2rf(DisasContext *ctx) -{ -#define MASK_MSA_2RF(op) (MASK_MSA_MINOR(op) | (op & (0x1f << 21)) | \ - (op & (0xf << 17))) - uint8_t wt = (ctx->opcode >> 16) & 0x1f; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - uint8_t df = (ctx->opcode >> 16) & 0x1; - TCGv_i32 twd = tcg_const_i32(wd); - TCGv_i32 tws = tcg_const_i32(ws); - TCGv_i32 twt = tcg_const_i32(wt); - /* adjust df value for floating-point instruction */ - TCGv_i32 tdf = tcg_const_i32(df + 2); - - switch (MASK_MSA_2RF(ctx->opcode)) { - case OPC_FCLASS_df: - gen_helper_msa_fclass_df(cpu_env, tdf, twd, tws); - break; - case OPC_FTRUNC_S_df: - gen_helper_msa_ftrunc_s_df(cpu_env, tdf, twd, tws); - break; - case OPC_FTRUNC_U_df: - gen_helper_msa_ftrunc_u_df(cpu_env, tdf, twd, tws); - break; - case OPC_FSQRT_df: - gen_helper_msa_fsqrt_df(cpu_env, tdf, twd, tws); - break; - case OPC_FRSQRT_df: - gen_helper_msa_frsqrt_df(cpu_env, tdf, twd, tws); - break; - case OPC_FRCP_df: - gen_helper_msa_frcp_df(cpu_env, tdf, twd, tws); - break; - case OPC_FRINT_df: - gen_helper_msa_frint_df(cpu_env, tdf, twd, tws); - break; - case OPC_FLOG2_df: - gen_helper_msa_flog2_df(cpu_env, tdf, twd, tws); - break; - case OPC_FEXUPL_df: - gen_helper_msa_fexupl_df(cpu_env, tdf, twd, tws); - break; - case OPC_FEXUPR_df: - gen_helper_msa_fexupr_df(cpu_env, tdf, twd, tws); - break; - case OPC_FFQL_df: - gen_helper_msa_ffql_df(cpu_env, tdf, twd, tws); - break; - case OPC_FFQR_df: - gen_helper_msa_ffqr_df(cpu_env, tdf, twd, tws); - break; - case OPC_FTINT_S_df: - gen_helper_msa_ftint_s_df(cpu_env, tdf, twd, tws); - break; - case OPC_FTINT_U_df: - gen_helper_msa_ftint_u_df(cpu_env, tdf, twd, tws); - break; - case OPC_FFINT_S_df: - gen_helper_msa_ffint_s_df(cpu_env, tdf, twd, tws); - break; - case OPC_FFINT_U_df: - gen_helper_msa_ffint_u_df(cpu_env, tdf, twd, tws); - break; - } - - tcg_temp_free_i32(twd); - tcg_temp_free_i32(tws); - tcg_temp_free_i32(twt); - tcg_temp_free_i32(tdf); -} - -static void gen_msa_vec_v(DisasContext *ctx) -{ -#define MASK_MSA_VEC(op) (MASK_MSA_MINOR(op) | (op & (0x1f << 21))) - uint8_t wt = (ctx->opcode >> 16) & 0x1f; - uint8_t ws = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - TCGv_i32 twd = tcg_const_i32(wd); - TCGv_i32 tws = tcg_const_i32(ws); - TCGv_i32 twt = tcg_const_i32(wt); - - switch (MASK_MSA_VEC(ctx->opcode)) { - case OPC_AND_V: - gen_helper_msa_and_v(cpu_env, twd, tws, twt); - break; - case OPC_OR_V: - gen_helper_msa_or_v(cpu_env, twd, tws, twt); - break; - case OPC_NOR_V: - gen_helper_msa_nor_v(cpu_env, twd, tws, twt); - break; - case OPC_XOR_V: - gen_helper_msa_xor_v(cpu_env, twd, tws, twt); - break; - case OPC_BMNZ_V: - gen_helper_msa_bmnz_v(cpu_env, twd, tws, twt); - break; - case OPC_BMZ_V: - gen_helper_msa_bmz_v(cpu_env, twd, tws, twt); - break; - case OPC_BSEL_V: - gen_helper_msa_bsel_v(cpu_env, twd, tws, twt); - break; - default: - MIPS_INVAL("MSA instruction"); - gen_reserved_instruction(ctx); - break; - } - - tcg_temp_free_i32(twd); - tcg_temp_free_i32(tws); - tcg_temp_free_i32(twt); -} - -static void gen_msa_vec(DisasContext *ctx) -{ - switch (MASK_MSA_VEC(ctx->opcode)) { - case OPC_AND_V: - case OPC_OR_V: - case OPC_NOR_V: - case OPC_XOR_V: - case OPC_BMNZ_V: - case OPC_BMZ_V: - case OPC_BSEL_V: - gen_msa_vec_v(ctx); - break; - case OPC_MSA_2R: - gen_msa_2r(ctx); - break; - case OPC_MSA_2RF: - gen_msa_2rf(ctx); - break; - default: - MIPS_INVAL("MSA instruction"); - gen_reserved_instruction(ctx); - break; - } -} - -static bool trans_MSA(DisasContext *ctx, arg_MSA *a) -{ - uint32_t opcode = ctx->opcode; - - check_msa_access(ctx); - - switch (MASK_MSA_MINOR(opcode)) { - case OPC_MSA_I8_00: - case OPC_MSA_I8_01: - case OPC_MSA_I8_02: - gen_msa_i8(ctx); - break; - case OPC_MSA_I5_06: - case OPC_MSA_I5_07: - gen_msa_i5(ctx); - break; - case OPC_MSA_BIT_09: - case OPC_MSA_BIT_0A: - gen_msa_bit(ctx); - break; - case OPC_MSA_3R_0D: - case OPC_MSA_3R_0E: - case OPC_MSA_3R_0F: - case OPC_MSA_3R_10: - case OPC_MSA_3R_11: - case OPC_MSA_3R_12: - case OPC_MSA_3R_13: - case OPC_MSA_3R_14: - case OPC_MSA_3R_15: - gen_msa_3r(ctx); - break; - case OPC_MSA_ELM: - gen_msa_elm(ctx); - break; - case OPC_MSA_3RF_1A: - case OPC_MSA_3RF_1B: - case OPC_MSA_3RF_1C: - gen_msa_3rf(ctx); - break; - case OPC_MSA_VEC: - gen_msa_vec(ctx); - break; - case OPC_LD_B: - case OPC_LD_H: - case OPC_LD_W: - case OPC_LD_D: - case OPC_ST_B: - case OPC_ST_H: - case OPC_ST_W: - case OPC_ST_D: - { - int32_t s10 = sextract32(ctx->opcode, 16, 10); - uint8_t rs = (ctx->opcode >> 11) & 0x1f; - uint8_t wd = (ctx->opcode >> 6) & 0x1f; - uint8_t df = (ctx->opcode >> 0) & 0x3; - - TCGv_i32 twd = tcg_const_i32(wd); - TCGv taddr = tcg_temp_new(); - gen_base_offset_addr(ctx, taddr, rs, s10 << df); - - switch (MASK_MSA_MINOR(opcode)) { - case OPC_LD_B: - gen_helper_msa_ld_b(cpu_env, twd, taddr); - break; - case OPC_LD_H: - gen_helper_msa_ld_h(cpu_env, twd, taddr); - break; - case OPC_LD_W: - gen_helper_msa_ld_w(cpu_env, twd, taddr); - break; - case OPC_LD_D: - gen_helper_msa_ld_d(cpu_env, twd, taddr); - break; - case OPC_ST_B: - gen_helper_msa_st_b(cpu_env, twd, taddr); - break; - case OPC_ST_H: - gen_helper_msa_st_h(cpu_env, twd, taddr); - break; - case OPC_ST_W: - gen_helper_msa_st_w(cpu_env, twd, taddr); - break; - case OPC_ST_D: - gen_helper_msa_st_d(cpu_env, twd, taddr); - break; - } - - tcg_temp_free_i32(twd); - tcg_temp_free(taddr); - } - break; - default: - MIPS_INVAL("MSA instruction"); - gen_reserved_instruction(ctx); - break; - } + gen_msa_i8(cpu_env, + tcg_constant_i32(a->wd), + tcg_constant_i32(a->ws), + tcg_constant_i32(a->sa)); return true; } +TRANS(ANDI, trans_msa_i8, gen_helper_msa_andi_b); +TRANS(ORI, trans_msa_i8, gen_helper_msa_ori_b); +TRANS(NORI, trans_msa_i8, gen_helper_msa_nori_b); +TRANS(XORI, trans_msa_i8, gen_helper_msa_xori_b); +TRANS(BMNZI, trans_msa_i8, gen_helper_msa_bmnzi_b); +TRANS(BMZI, trans_msa_i8, gen_helper_msa_bmzi_b); +TRANS(BSELI, trans_msa_i8, gen_helper_msa_bseli_b); + +static bool trans_SHF(DisasContext *ctx, arg_msa_i *a) +{ + if (a->df == DF_DOUBLE) { + return false; + } + + if (!check_msa_enabled(ctx)) { + return true; + } + + gen_helper_msa_shf_df(cpu_env, + tcg_constant_i32(a->df), + tcg_constant_i32(a->wd), + tcg_constant_i32(a->ws), + tcg_constant_i32(a->sa)); + + return true; +} + +static bool trans_msa_i5(DisasContext *ctx, arg_msa_i *a, + gen_helper_piiii *gen_msa_i5) +{ + if (!check_msa_enabled(ctx)) { + return true; + } + + gen_msa_i5(cpu_env, + tcg_constant_i32(a->df), + tcg_constant_i32(a->wd), + tcg_constant_i32(a->ws), + tcg_constant_i32(a->sa)); + + return true; +} + +TRANS(ADDVI, trans_msa_i5, gen_helper_msa_addvi_df); +TRANS(SUBVI, trans_msa_i5, gen_helper_msa_subvi_df); +TRANS(MAXI_S, trans_msa_i5, gen_helper_msa_maxi_s_df); +TRANS(MAXI_U, trans_msa_i5, gen_helper_msa_maxi_u_df); +TRANS(MINI_S, trans_msa_i5, gen_helper_msa_mini_s_df); +TRANS(MINI_U, trans_msa_i5, gen_helper_msa_mini_u_df); +TRANS(CLTI_S, trans_msa_i5, gen_helper_msa_clti_s_df); +TRANS(CLTI_U, trans_msa_i5, gen_helper_msa_clti_u_df); +TRANS(CLEI_S, trans_msa_i5, gen_helper_msa_clei_s_df); +TRANS(CLEI_U, trans_msa_i5, gen_helper_msa_clei_u_df); +TRANS(CEQI, trans_msa_i5, gen_helper_msa_ceqi_df); + +static bool trans_LDI(DisasContext *ctx, arg_msa_ldi *a) +{ + if (!check_msa_enabled(ctx)) { + return true; + } + + gen_helper_msa_ldi_df(cpu_env, + tcg_constant_i32(a->df), + tcg_constant_i32(a->wd), + tcg_constant_i32(a->sa)); + + return true; +} + +static bool trans_msa_bit(DisasContext *ctx, arg_msa_bit *a, + gen_helper_piiii *gen_msa_bit) +{ + if (a->df < 0) { + return false; + } + + if (!check_msa_enabled(ctx)) { + return true; + } + + gen_msa_bit(cpu_env, + tcg_constant_i32(a->df), + tcg_constant_i32(a->wd), + tcg_constant_i32(a->ws), + tcg_constant_i32(a->m)); + + return true; +} + +TRANS(SLLI, trans_msa_bit, gen_helper_msa_slli_df); +TRANS(SRAI, trans_msa_bit, gen_helper_msa_srai_df); +TRANS(SRLI, trans_msa_bit, gen_helper_msa_srli_df); +TRANS(BCLRI, trans_msa_bit, gen_helper_msa_bclri_df); +TRANS(BSETI, trans_msa_bit, gen_helper_msa_bseti_df); +TRANS(BNEGI, trans_msa_bit, gen_helper_msa_bnegi_df); +TRANS(BINSLI, trans_msa_bit, gen_helper_msa_binsli_df); +TRANS(BINSRI, trans_msa_bit, gen_helper_msa_binsri_df); +TRANS(SAT_S, trans_msa_bit, gen_helper_msa_sat_u_df); +TRANS(SAT_U, trans_msa_bit, gen_helper_msa_sat_u_df); +TRANS(SRARI, trans_msa_bit, gen_helper_msa_srari_df); +TRANS(SRLRI, trans_msa_bit, gen_helper_msa_srlri_df); + +static bool trans_msa_3rf(DisasContext *ctx, arg_msa_r *a, + gen_helper_piiii *gen_msa_3rf) +{ + if (!check_msa_enabled(ctx)) { + return true; + } + + gen_msa_3rf(cpu_env, + tcg_constant_i32(a->df), + tcg_constant_i32(a->wd), + tcg_constant_i32(a->ws), + tcg_constant_i32(a->wt)); + + return true; +} + +static bool trans_msa_3r(DisasContext *ctx, arg_msa_r *a, + gen_helper_piii *gen_msa_3r) +{ + if (!gen_msa_3r) { + return false; + } + + if (!check_msa_enabled(ctx)) { + return true; + } + + gen_msa_3r(cpu_env, + tcg_constant_i32(a->wd), + tcg_constant_i32(a->ws), + tcg_constant_i32(a->wt)); + + return true; +} + +TRANS(AND_V, trans_msa_3r, gen_helper_msa_and_v); +TRANS(OR_V, trans_msa_3r, gen_helper_msa_or_v); +TRANS(NOR_V, trans_msa_3r, gen_helper_msa_nor_v); +TRANS(XOR_V, trans_msa_3r, gen_helper_msa_xor_v); +TRANS(BMNZ_V, trans_msa_3r, gen_helper_msa_bmnz_v); +TRANS(BMZ_V, trans_msa_3r, gen_helper_msa_bmz_v); +TRANS(BSEL_V, trans_msa_3r, gen_helper_msa_bsel_v); + +TRANS_DF_iii(SLL, trans_msa_3r, gen_helper_msa_sll); +TRANS_DF_iii(SRA, trans_msa_3r, gen_helper_msa_sra); +TRANS_DF_iii(SRL, trans_msa_3r, gen_helper_msa_srl); +TRANS_DF_iii(BCLR, trans_msa_3r, gen_helper_msa_bclr); +TRANS_DF_iii(BSET, trans_msa_3r, gen_helper_msa_bset); +TRANS_DF_iii(BNEG, trans_msa_3r, gen_helper_msa_bneg); +TRANS_DF_iii(BINSL, trans_msa_3r, gen_helper_msa_binsl); +TRANS_DF_iii(BINSR, trans_msa_3r, gen_helper_msa_binsr); + +TRANS_DF_iii(ADDV, trans_msa_3r, gen_helper_msa_addv); +TRANS_DF_iii(SUBV, trans_msa_3r, gen_helper_msa_subv); +TRANS_DF_iii(MAX_S, trans_msa_3r, gen_helper_msa_max_s); +TRANS_DF_iii(MAX_U, trans_msa_3r, gen_helper_msa_max_u); +TRANS_DF_iii(MIN_S, trans_msa_3r, gen_helper_msa_min_s); +TRANS_DF_iii(MIN_U, trans_msa_3r, gen_helper_msa_min_u); +TRANS_DF_iii(MAX_A, trans_msa_3r, gen_helper_msa_max_a); +TRANS_DF_iii(MIN_A, trans_msa_3r, gen_helper_msa_min_a); + +TRANS_DF_iii(CEQ, trans_msa_3r, gen_helper_msa_ceq); +TRANS_DF_iii(CLT_S, trans_msa_3r, gen_helper_msa_clt_s); +TRANS_DF_iii(CLT_U, trans_msa_3r, gen_helper_msa_clt_u); +TRANS_DF_iii(CLE_S, trans_msa_3r, gen_helper_msa_cle_s); +TRANS_DF_iii(CLE_U, trans_msa_3r, gen_helper_msa_cle_u); + +TRANS_DF_iii(ADD_A, trans_msa_3r, gen_helper_msa_add_a); +TRANS_DF_iii(ADDS_A, trans_msa_3r, gen_helper_msa_adds_a); +TRANS_DF_iii(ADDS_S, trans_msa_3r, gen_helper_msa_adds_s); +TRANS_DF_iii(ADDS_U, trans_msa_3r, gen_helper_msa_adds_u); +TRANS_DF_iii(AVE_S, trans_msa_3r, gen_helper_msa_ave_s); +TRANS_DF_iii(AVE_U, trans_msa_3r, gen_helper_msa_ave_u); +TRANS_DF_iii(AVER_S, trans_msa_3r, gen_helper_msa_aver_s); +TRANS_DF_iii(AVER_U, trans_msa_3r, gen_helper_msa_aver_u); + +TRANS_DF_iii(SUBS_S, trans_msa_3r, gen_helper_msa_subs_s); +TRANS_DF_iii(SUBS_U, trans_msa_3r, gen_helper_msa_subs_u); +TRANS_DF_iii(SUBSUS_U, trans_msa_3r, gen_helper_msa_subsus_u); +TRANS_DF_iii(SUBSUU_S, trans_msa_3r, gen_helper_msa_subsuu_s); +TRANS_DF_iii(ASUB_S, trans_msa_3r, gen_helper_msa_asub_s); +TRANS_DF_iii(ASUB_U, trans_msa_3r, gen_helper_msa_asub_u); + +TRANS_DF_iii(MULV, trans_msa_3r, gen_helper_msa_mulv); +TRANS_DF_iii(MADDV, trans_msa_3r, gen_helper_msa_maddv); +TRANS_DF_iii(MSUBV, trans_msa_3r, gen_helper_msa_msubv); +TRANS_DF_iii(DIV_S, trans_msa_3r, gen_helper_msa_div_s); +TRANS_DF_iii(DIV_U, trans_msa_3r, gen_helper_msa_div_u); +TRANS_DF_iii(MOD_S, trans_msa_3r, gen_helper_msa_mod_s); +TRANS_DF_iii(MOD_U, trans_msa_3r, gen_helper_msa_mod_u); + +TRANS_DF_iii_b(DOTP_S, trans_msa_3r, gen_helper_msa_dotp_s); +TRANS_DF_iii_b(DOTP_U, trans_msa_3r, gen_helper_msa_dotp_u); +TRANS_DF_iii_b(DPADD_S, trans_msa_3r, gen_helper_msa_dpadd_s); +TRANS_DF_iii_b(DPADD_U, trans_msa_3r, gen_helper_msa_dpadd_u); +TRANS_DF_iii_b(DPSUB_S, trans_msa_3r, gen_helper_msa_dpsub_s); +TRANS_DF_iii_b(DPSUB_U, trans_msa_3r, gen_helper_msa_dpsub_u); + +TRANS(SLD, trans_msa_3rf, gen_helper_msa_sld_df); +TRANS(SPLAT, trans_msa_3rf, gen_helper_msa_splat_df); +TRANS_DF_iii(PCKEV, trans_msa_3r, gen_helper_msa_pckev); +TRANS_DF_iii(PCKOD, trans_msa_3r, gen_helper_msa_pckod); +TRANS_DF_iii(ILVL, trans_msa_3r, gen_helper_msa_ilvl); +TRANS_DF_iii(ILVR, trans_msa_3r, gen_helper_msa_ilvr); +TRANS_DF_iii(ILVEV, trans_msa_3r, gen_helper_msa_ilvev); +TRANS_DF_iii(ILVOD, trans_msa_3r, gen_helper_msa_ilvod); + +TRANS(VSHF, trans_msa_3rf, gen_helper_msa_vshf_df); +TRANS_DF_iii(SRAR, trans_msa_3r, gen_helper_msa_srar); +TRANS_DF_iii(SRLR, trans_msa_3r, gen_helper_msa_srlr); +TRANS_DF_iii_b(HADD_S, trans_msa_3r, gen_helper_msa_hadd_s); +TRANS_DF_iii_b(HADD_U, trans_msa_3r, gen_helper_msa_hadd_u); +TRANS_DF_iii_b(HSUB_S, trans_msa_3r, gen_helper_msa_hsub_s); +TRANS_DF_iii_b(HSUB_U, trans_msa_3r, gen_helper_msa_hsub_u); + +static bool trans_MOVE_V(DisasContext *ctx, arg_msa_elm *a) +{ + if (!check_msa_enabled(ctx)) { + return true; + } + + gen_helper_msa_move_v(cpu_env, + tcg_constant_i32(a->wd), + tcg_constant_i32(a->ws)); + + return true; +} + +static bool trans_CTCMSA(DisasContext *ctx, arg_msa_elm *a) +{ + TCGv telm; + + if (!check_msa_enabled(ctx)) { + return true; + } + + telm = tcg_temp_new(); + + gen_load_gpr(telm, a->ws); + gen_helper_msa_ctcmsa(cpu_env, telm, tcg_constant_i32(a->wd)); + + tcg_temp_free(telm); + + return true; +} + +static bool trans_CFCMSA(DisasContext *ctx, arg_msa_elm *a) +{ + TCGv telm; + + if (!check_msa_enabled(ctx)) { + return true; + } + + telm = tcg_temp_new(); + + gen_helper_msa_cfcmsa(telm, cpu_env, tcg_constant_i32(a->ws)); + gen_store_gpr(telm, a->wd); + + tcg_temp_free(telm); + + return true; +} + +static bool trans_msa_elm(DisasContext *ctx, arg_msa_elm_df *a, + gen_helper_piiii *gen_msa_elm_df) +{ + if (a->df < 0) { + return false; + } + + if (!check_msa_enabled(ctx)) { + return true; + } + + gen_msa_elm_df(cpu_env, + tcg_constant_i32(a->df), + tcg_constant_i32(a->wd), + tcg_constant_i32(a->ws), + tcg_constant_i32(a->n)); + + return true; +} + +TRANS(SLDI, trans_msa_elm, gen_helper_msa_sldi_df); +TRANS(SPLATI, trans_msa_elm, gen_helper_msa_splati_df); +TRANS(INSVE, trans_msa_elm, gen_helper_msa_insve_df); + +static bool trans_msa_elm_fn(DisasContext *ctx, arg_msa_elm_df *a, + gen_helper_piii * const gen_msa_elm[4]) +{ + if (a->df < 0 || !gen_msa_elm[a->df]) { + return false; + } + + if (check_msa_enabled(ctx)) { + return true; + } + + if (a->wd == 0) { + /* Treat as NOP. */ + return true; + } + + gen_msa_elm[a->df](cpu_env, + tcg_constant_i32(a->wd), + tcg_constant_i32(a->ws), + tcg_constant_i32(a->n)); + + return true; +} + +#if defined(TARGET_MIPS64) +#define NULL_IF_MIPS32(function) function +#else +#define NULL_IF_MIPS32(function) NULL +#endif + +static bool trans_COPY_U(DisasContext *ctx, arg_msa_elm_df *a) +{ + static gen_helper_piii * const gen_msa_copy_u[4] = { + gen_helper_msa_copy_u_b, gen_helper_msa_copy_u_h, + NULL_IF_MIPS32(gen_helper_msa_copy_u_w), NULL + }; + + return trans_msa_elm_fn(ctx, a, gen_msa_copy_u); +} + +static bool trans_COPY_S(DisasContext *ctx, arg_msa_elm_df *a) +{ + static gen_helper_piii * const gen_msa_copy_s[4] = { + gen_helper_msa_copy_s_b, gen_helper_msa_copy_s_h, + gen_helper_msa_copy_s_w, NULL_IF_MIPS32(gen_helper_msa_copy_s_d) + }; + + return trans_msa_elm_fn(ctx, a, gen_msa_copy_s); +} + +static bool trans_INSERT(DisasContext *ctx, arg_msa_elm_df *a) +{ + static gen_helper_piii * const gen_msa_insert[4] = { + gen_helper_msa_insert_b, gen_helper_msa_insert_h, + gen_helper_msa_insert_w, NULL_IF_MIPS32(gen_helper_msa_insert_d) + }; + + return trans_msa_elm_fn(ctx, a, gen_msa_insert); +} + +TRANS(FCAF, trans_msa_3rf, gen_helper_msa_fcaf_df); +TRANS(FCUN, trans_msa_3rf, gen_helper_msa_fcun_df); +TRANS(FCEQ, trans_msa_3rf, gen_helper_msa_fceq_df); +TRANS(FCUEQ, trans_msa_3rf, gen_helper_msa_fcueq_df); +TRANS(FCLT, trans_msa_3rf, gen_helper_msa_fclt_df); +TRANS(FCULT, trans_msa_3rf, gen_helper_msa_fcult_df); +TRANS(FCLE, trans_msa_3rf, gen_helper_msa_fcle_df); +TRANS(FCULE, trans_msa_3rf, gen_helper_msa_fcule_df); +TRANS(FSAF, trans_msa_3rf, gen_helper_msa_fsaf_df); +TRANS(FSUN, trans_msa_3rf, gen_helper_msa_fsun_df); +TRANS(FSEQ, trans_msa_3rf, gen_helper_msa_fseq_df); +TRANS(FSUEQ, trans_msa_3rf, gen_helper_msa_fsueq_df); +TRANS(FSLT, trans_msa_3rf, gen_helper_msa_fslt_df); +TRANS(FSULT, trans_msa_3rf, gen_helper_msa_fsult_df); +TRANS(FSLE, trans_msa_3rf, gen_helper_msa_fsle_df); +TRANS(FSULE, trans_msa_3rf, gen_helper_msa_fsule_df); + +TRANS(FADD, trans_msa_3rf, gen_helper_msa_fadd_df); +TRANS(FSUB, trans_msa_3rf, gen_helper_msa_fsub_df); +TRANS(FMUL, trans_msa_3rf, gen_helper_msa_fmul_df); +TRANS(FDIV, trans_msa_3rf, gen_helper_msa_fdiv_df); +TRANS(FMADD, trans_msa_3rf, gen_helper_msa_fmadd_df); +TRANS(FMSUB, trans_msa_3rf, gen_helper_msa_fmsub_df); +TRANS(FEXP2, trans_msa_3rf, gen_helper_msa_fexp2_df); +TRANS(FEXDO, trans_msa_3rf, gen_helper_msa_fexdo_df); +TRANS(FTQ, trans_msa_3rf, gen_helper_msa_ftq_df); +TRANS(FMIN, trans_msa_3rf, gen_helper_msa_fmin_df); +TRANS(FMIN_A, trans_msa_3rf, gen_helper_msa_fmin_a_df); +TRANS(FMAX, trans_msa_3rf, gen_helper_msa_fmax_df); +TRANS(FMAX_A, trans_msa_3rf, gen_helper_msa_fmax_a_df); + +TRANS(FCOR, trans_msa_3rf, gen_helper_msa_fcor_df); +TRANS(FCUNE, trans_msa_3rf, gen_helper_msa_fcune_df); +TRANS(FCNE, trans_msa_3rf, gen_helper_msa_fcne_df); +TRANS(MUL_Q, trans_msa_3rf, gen_helper_msa_mul_q_df); +TRANS(MADD_Q, trans_msa_3rf, gen_helper_msa_madd_q_df); +TRANS(MSUB_Q, trans_msa_3rf, gen_helper_msa_msub_q_df); +TRANS(FSOR, trans_msa_3rf, gen_helper_msa_fsor_df); +TRANS(FSUNE, trans_msa_3rf, gen_helper_msa_fsune_df); +TRANS(FSNE, trans_msa_3rf, gen_helper_msa_fsne_df); +TRANS(MULR_Q, trans_msa_3rf, gen_helper_msa_mulr_q_df); +TRANS(MADDR_Q, trans_msa_3rf, gen_helper_msa_maddr_q_df); +TRANS(MSUBR_Q, trans_msa_3rf, gen_helper_msa_msubr_q_df); + +static bool trans_msa_2r(DisasContext *ctx, arg_msa_r *a, + gen_helper_pii *gen_msa_2r) +{ + if (!check_msa_enabled(ctx)) { + return true; + } + + gen_msa_2r(cpu_env, tcg_constant_i32(a->wd), tcg_constant_i32(a->ws)); + + return true; +} + +TRANS_DF_ii(PCNT, trans_msa_2r, gen_helper_msa_pcnt); +TRANS_DF_ii(NLOC, trans_msa_2r, gen_helper_msa_nloc); +TRANS_DF_ii(NLZC, trans_msa_2r, gen_helper_msa_nlzc); + +static bool trans_FILL(DisasContext *ctx, arg_msa_r *a) +{ + if (TARGET_LONG_BITS != 64 && a->df == DF_DOUBLE) { + /* Double format valid only for MIPS64 */ + return false; + } + + if (!check_msa_enabled(ctx)) { + return true; + } + + gen_helper_msa_fill_df(cpu_env, + tcg_constant_i32(a->df), + tcg_constant_i32(a->wd), + tcg_constant_i32(a->ws)); + + return true; +} + +static bool trans_msa_2rf(DisasContext *ctx, arg_msa_r *a, + gen_helper_piii *gen_msa_2rf) +{ + if (!check_msa_enabled(ctx)) { + return true; + } + + gen_msa_2rf(cpu_env, + tcg_constant_i32(a->df), + tcg_constant_i32(a->wd), + tcg_constant_i32(a->ws)); + + return true; +} + +TRANS(FCLASS, trans_msa_2rf, gen_helper_msa_fclass_df); +TRANS(FTRUNC_S, trans_msa_2rf, gen_helper_msa_fclass_df); +TRANS(FTRUNC_U, trans_msa_2rf, gen_helper_msa_ftrunc_s_df); +TRANS(FSQRT, trans_msa_2rf, gen_helper_msa_fsqrt_df); +TRANS(FRSQRT, trans_msa_2rf, gen_helper_msa_frsqrt_df); +TRANS(FRCP, trans_msa_2rf, gen_helper_msa_frcp_df); +TRANS(FRINT, trans_msa_2rf, gen_helper_msa_frint_df); +TRANS(FLOG2, trans_msa_2rf, gen_helper_msa_flog2_df); +TRANS(FEXUPL, trans_msa_2rf, gen_helper_msa_fexupl_df); +TRANS(FEXUPR, trans_msa_2rf, gen_helper_msa_fexupr_df); +TRANS(FFQL, trans_msa_2rf, gen_helper_msa_ffql_df); +TRANS(FFQR, trans_msa_2rf, gen_helper_msa_ffqr_df); +TRANS(FTINT_S, trans_msa_2rf, gen_helper_msa_ftint_s_df); +TRANS(FTINT_U, trans_msa_2rf, gen_helper_msa_ftint_u_df); +TRANS(FFINT_S, trans_msa_2rf, gen_helper_msa_ffint_s_df); +TRANS(FFINT_U, trans_msa_2rf, gen_helper_msa_ffint_u_df); + +static bool trans_msa_ldst(DisasContext *ctx, arg_msa_i *a, + gen_helper_piv *gen_msa_ldst) +{ + TCGv taddr; + + if (!check_msa_enabled(ctx)) { + return true; + } + + taddr = tcg_temp_new(); + + gen_base_offset_addr(ctx, taddr, a->ws, a->sa << a->df); + gen_msa_ldst(cpu_env, tcg_constant_i32(a->wd), taddr); + + tcg_temp_free(taddr); + + return true; +} + +TRANS_DF_iv(LD, trans_msa_ldst, gen_helper_msa_ld); +TRANS_DF_iv(ST, trans_msa_ldst, gen_helper_msa_st); + static bool trans_LSA(DisasContext *ctx, arg_r *a) { return gen_lsa(ctx, a->rd, a->rt, a->rs, a->sa); diff --git a/target/mips/tcg/nanomips_translate.c.inc b/target/mips/tcg/nanomips_translate.c.inc index ccbcecad09..2c022a49f2 100644 --- a/target/mips/tcg/nanomips_translate.c.inc +++ b/target/mips/tcg/nanomips_translate.c.inc @@ -1868,6 +1868,9 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, TCGv_i32 t2 = tcg_temp_new_i32(); TCGv_i32 t3 = tcg_temp_new_i32(); + if (acc || ctx->insn_flags & ISA_MIPS_R6) { + check_dsp_r2(ctx); + } gen_load_gpr(t0, rs); gen_load_gpr(t1, rt); tcg_gen_trunc_tl_i32(t2, t0); @@ -1925,6 +1928,9 @@ static void gen_pool32axf_2_nanomips_insn(DisasContext *ctx, uint32_t opc, TCGv_i32 t2 = tcg_temp_new_i32(); TCGv_i32 t3 = tcg_temp_new_i32(); + if (acc || ctx->insn_flags & ISA_MIPS_R6) { + check_dsp_r2(ctx); + } gen_load_gpr(t0, rs); gen_load_gpr(t1, rt); tcg_gen_trunc_tl_i32(t2, t0); diff --git a/target/mips/tcg/tcg-internal.h b/target/mips/tcg/tcg-internal.h index bad3deb611..466768aec4 100644 --- a/target/mips/tcg/tcg-internal.h +++ b/target/mips/tcg/tcg-internal.h @@ -18,9 +18,6 @@ void mips_tcg_init(void); void mips_cpu_synchronize_from_tb(CPUState *cs, const TranslationBlock *tb); -bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) QEMU_NORETURN; @@ -60,6 +57,10 @@ void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, MemTxResult response, uintptr_t retaddr); void cpu_mips_tlb_flush(CPUMIPSState *env); +bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); + #endif /* !CONFIG_USER_ONLY */ #endif diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c index 148afec9dc..47db35d7dd 100644 --- a/target/mips/tcg/translate.c +++ b/target/mips/tcg/translate.c @@ -4823,12 +4823,7 @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) tcg_gen_exit_tb(ctx->base.tb, n); } else { gen_save_pc(dest); - if (ctx->base.singlestep_enabled) { - save_cpu_state(ctx, 0); - gen_helper_raise_exception_debug(cpu_env); - } else { - tcg_gen_lookup_and_goto_ptr(); - } + tcg_gen_lookup_and_goto_ptr(); } } @@ -11788,10 +11783,6 @@ static void gen_branch(DisasContext *ctx, int insn_bytes) } else { tcg_gen_mov_tl(cpu_PC, btarget); } - if (ctx->base.singlestep_enabled) { - save_cpu_state(ctx, 0); - gen_helper_raise_exception_debug(cpu_env); - } tcg_gen_lookup_and_goto_ptr(); break; default: @@ -11866,13 +11857,11 @@ static void gen_compute_compact_branch(DisasContext *ctx, uint32_t opc, } else { /* OPC_JIC, OPC_JIALC */ TCGv tbase = tcg_temp_new(); - TCGv toffset = tcg_temp_new(); + TCGv toffset = tcg_constant_tl(offset); gen_load_gpr(tbase, rt); - tcg_gen_movi_tl(toffset, offset); gen_op_addr_add(ctx, btarget, tbase, toffset); tcg_temp_free(tbase); - tcg_temp_free(toffset); } break; default: @@ -13627,7 +13616,6 @@ static void gen_mipsdsp_accinsn(DisasContext *ctx, uint32_t op1, uint32_t op2, TCGv t0; TCGv t1; TCGv v1_t; - TCGv v2_t; int16_t imm; if ((ret == 0) && (check_ret == 1)) { @@ -13638,10 +13626,8 @@ static void gen_mipsdsp_accinsn(DisasContext *ctx, uint32_t op1, uint32_t op2, t0 = tcg_temp_new(); t1 = tcg_temp_new(); v1_t = tcg_temp_new(); - v2_t = tcg_temp_new(); gen_load_gpr(v1_t, v1); - gen_load_gpr(v2_t, v2); switch (op1) { case OPC_EXTR_W_DSP: @@ -13807,8 +13793,7 @@ static void gen_mipsdsp_accinsn(DisasContext *ctx, uint32_t op1, uint32_t op2, break; case OPC_DEXTRV_S_H: tcg_gen_movi_tl(t0, v2); - tcg_gen_movi_tl(t1, v1); - gen_helper_dextr_s_h(cpu_gpr[ret], t0, t1, cpu_env); + gen_helper_dextr_s_h(cpu_gpr[ret], t0, v1_t, cpu_env); break; case OPC_DEXTRV_L: tcg_gen_movi_tl(t0, v2); @@ -13842,7 +13827,6 @@ static void gen_mipsdsp_accinsn(DisasContext *ctx, uint32_t op1, uint32_t op2, tcg_temp_free(t0); tcg_temp_free(t1); tcg_temp_free(v1_t); - tcg_temp_free(v2_t); } /* End MIPSDSP functions. */ @@ -16016,6 +16000,16 @@ static void mips_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->default_tcg_memop_mask = (ctx->insn_flags & (ISA_MIPS_R6 | INSN_LOONGSON3A)) ? MO_UNALN : MO_ALIGN; + /* + * Execute a branch and its delay slot as a single instruction. + * This is what GDB expects and is consistent with what the + * hardware does (e.g. if a delay slot instruction faults, the + * reported PC is the PC of the branch). + */ + if (ctx->base.singlestep_enabled && (ctx->hflags & MIPS_HFLAG_BMASK)) { + ctx->base.max_insns = 2; + } + LOG_DISAS("\ntb %p idx %d hflags %04x\n", ctx->base.tb, ctx->mem_idx, ctx->hflags); } @@ -16085,17 +16079,14 @@ static void mips_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) if (ctx->base.is_jmp != DISAS_NEXT) { return; } + /* - * Execute a branch and its delay slot as a single instruction. - * This is what GDB expects and is consistent with what the - * hardware does (e.g. if a delay slot instruction faults, the - * reported PC is the PC of the branch). + * End the TB on (most) page crossings. + * See mips_tr_init_disas_context about single-stepping a branch + * together with its delay slot. */ - if (ctx->base.singlestep_enabled && - (ctx->hflags & MIPS_HFLAG_BMASK) == 0) { - ctx->base.is_jmp = DISAS_TOO_MANY; - } - if (ctx->base.pc_next - ctx->page_start >= TARGET_PAGE_SIZE) { + if (ctx->base.pc_next - ctx->page_start >= TARGET_PAGE_SIZE + && !ctx->base.singlestep_enabled) { ctx->base.is_jmp = DISAS_TOO_MANY; } } @@ -16104,28 +16095,23 @@ static void mips_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) { DisasContext *ctx = container_of(dcbase, DisasContext, base); - if (ctx->base.singlestep_enabled && ctx->base.is_jmp != DISAS_NORETURN) { - save_cpu_state(ctx, ctx->base.is_jmp != DISAS_EXIT); - gen_helper_raise_exception_debug(cpu_env); - } else { - switch (ctx->base.is_jmp) { - case DISAS_STOP: - gen_save_pc(ctx->base.pc_next); - tcg_gen_lookup_and_goto_ptr(); - break; - case DISAS_NEXT: - case DISAS_TOO_MANY: - save_cpu_state(ctx, 0); - gen_goto_tb(ctx, 0, ctx->base.pc_next); - break; - case DISAS_EXIT: - tcg_gen_exit_tb(NULL, 0); - break; - case DISAS_NORETURN: - break; - default: - g_assert_not_reached(); - } + switch (ctx->base.is_jmp) { + case DISAS_STOP: + gen_save_pc(ctx->base.pc_next); + tcg_gen_lookup_and_goto_ptr(); + break; + case DISAS_NEXT: + case DISAS_TOO_MANY: + save_cpu_state(ctx, 0); + gen_goto_tb(ctx, 0, ctx->base.pc_next); + break; + case DISAS_EXIT: + tcg_gen_exit_tb(NULL, 0); + break; + case DISAS_NORETURN: + break; + default: + g_assert_not_reached(); } } diff --git a/target/mips/tcg/user/meson.build b/target/mips/tcg/user/meson.build deleted file mode 100644 index 79badcd321..0000000000 --- a/target/mips/tcg/user/meson.build +++ /dev/null @@ -1,3 +0,0 @@ -mips_user_ss.add(files( - 'tlb_helper.c', -)) diff --git a/target/mips/tcg/user/tlb_helper.c b/target/mips/tcg/user/tlb_helper.c deleted file mode 100644 index 210c6d529e..0000000000 --- a/target/mips/tcg/user/tlb_helper.c +++ /dev/null @@ -1,59 +0,0 @@ -/* - * MIPS TLB (Translation lookaside buffer) helpers. - * - * Copyright (c) 2004-2005 Jocelyn Mayer - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ -#include "qemu/osdep.h" - -#include "cpu.h" -#include "exec/exec-all.h" -#include "internal.h" - -static void raise_mmu_exception(CPUMIPSState *env, target_ulong address, - MMUAccessType access_type) -{ - CPUState *cs = env_cpu(env); - - env->error_code = 0; - if (access_type == MMU_INST_FETCH) { - env->error_code |= EXCP_INST_NOTAVAIL; - } - - /* Reference to kernel address from user mode or supervisor mode */ - /* Reference to supervisor address from user mode */ - if (access_type == MMU_DATA_STORE) { - cs->exception_index = EXCP_AdES; - } else { - cs->exception_index = EXCP_AdEL; - } - - /* Raise exception */ - if (!(env->hflags & MIPS_HFLAG_DM)) { - env->CP0_BadVAddr = address; - } -} - -bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) -{ - MIPSCPU *cpu = MIPS_CPU(cs); - CPUMIPSState *env = &cpu->env; - - /* data access */ - raise_mmu_exception(env, address, access_type); - do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr); -} diff --git a/target/nios2/cpu.c b/target/nios2/cpu.c index 947bb09bc1..4cade61e93 100644 --- a/target/nios2/cpu.c +++ b/target/nios2/cpu.c @@ -147,11 +147,7 @@ static void nios2_cpu_disas_set_info(CPUState *cpu, disassemble_info *info) { /* NOTE: NiosII R2 is not supported yet. */ info->mach = bfd_arch_nios2; -#ifdef TARGET_WORDS_BIGENDIAN - info->print_insn = print_insn_big_nios2; -#else - info->print_insn = print_insn_little_nios2; -#endif + info->print_insn = print_insn_nios2; } static int nios2_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) @@ -220,9 +216,11 @@ static const struct SysemuCPUOps nios2_sysemu_ops = { static const struct TCGCPUOps nios2_tcg_ops = { .initialize = nios2_tcg_init, - .tlb_fill = nios2_cpu_tlb_fill, -#ifndef CONFIG_USER_ONLY +#ifdef CONFIG_USER_ONLY + .record_sigsegv = nios2_cpu_record_sigsegv, +#else + .tlb_fill = nios2_cpu_tlb_fill, .cpu_exec_interrupt = nios2_cpu_exec_interrupt, .do_interrupt = nios2_cpu_do_interrupt, .do_unaligned_access = nios2_cpu_do_unaligned_access, diff --git a/target/nios2/cpu.h b/target/nios2/cpu.h index a80587338a..1a69ed7a49 100644 --- a/target/nios2/cpu.h +++ b/target/nios2/cpu.h @@ -218,9 +218,15 @@ static inline int cpu_mmu_index(CPUNios2State *env, bool ifetch) MMU_SUPERVISOR_IDX; } +#ifdef CONFIG_USER_ONLY +void nios2_cpu_record_sigsegv(CPUState *cpu, vaddr addr, + MMUAccessType access_type, + bool maperr, uintptr_t ra); +#else bool nios2_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); +#endif static inline int cpu_interrupts_enabled(CPUNios2State *env) { diff --git a/target/nios2/helper.c b/target/nios2/helper.c index 53be8398e9..e5c98650e1 100644 --- a/target/nios2/helper.c +++ b/target/nios2/helper.c @@ -38,10 +38,11 @@ void nios2_cpu_do_interrupt(CPUState *cs) env->regs[R_EA] = env->regs[R_PC] + 4; } -bool nios2_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) +void nios2_cpu_record_sigsegv(CPUState *cs, vaddr addr, + MMUAccessType access_type, + bool maperr, uintptr_t retaddr) { + /* FIXME: Disentangle kuser page from linux-user sigsegv handling. */ cs->exception_index = 0xaa; cpu_loop_exit_restore(cs, retaddr); } diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c index 27cb04152f..dfbafc5236 100644 --- a/target/openrisc/cpu.c +++ b/target/openrisc/cpu.c @@ -186,9 +186,9 @@ static const struct SysemuCPUOps openrisc_sysemu_ops = { static const struct TCGCPUOps openrisc_tcg_ops = { .initialize = openrisc_translate_init, - .tlb_fill = openrisc_cpu_tlb_fill, #ifndef CONFIG_USER_ONLY + .tlb_fill = openrisc_cpu_tlb_fill, .cpu_exec_interrupt = openrisc_cpu_exec_interrupt, .do_interrupt = openrisc_cpu_do_interrupt, #endif /* !CONFIG_USER_ONLY */ diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h index 187a4a114e..ee069b080c 100644 --- a/target/openrisc/cpu.h +++ b/target/openrisc/cpu.h @@ -317,14 +317,15 @@ hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); int openrisc_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int openrisc_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); void openrisc_translate_init(void); -bool openrisc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); int print_insn_or1k(bfd_vma addr, disassemble_info *info); #define cpu_list cpu_openrisc_list #ifndef CONFIG_USER_ONLY +bool openrisc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); + extern const VMStateDescription vmstate_openrisc_cpu; void openrisc_cpu_do_interrupt(CPUState *cpu); diff --git a/target/openrisc/meson.build b/target/openrisc/meson.build index e445dec4a0..84322086ec 100644 --- a/target/openrisc/meson.build +++ b/target/openrisc/meson.build @@ -10,7 +10,6 @@ openrisc_ss.add(files( 'fpu_helper.c', 'gdbstub.c', 'interrupt_helper.c', - 'mmu.c', 'sys_helper.c', 'translate.c', )) @@ -19,6 +18,7 @@ openrisc_softmmu_ss = ss.source_set() openrisc_softmmu_ss.add(files( 'interrupt.c', 'machine.c', + 'mmu.c', )) target_arch += {'openrisc': openrisc_ss} diff --git a/target/openrisc/mmu.c b/target/openrisc/mmu.c index 94df8c7bef..e561ef245b 100644 --- a/target/openrisc/mmu.c +++ b/target/openrisc/mmu.c @@ -23,11 +23,8 @@ #include "exec/exec-all.h" #include "exec/gdbstub.h" #include "qemu/host-utils.h" -#ifndef CONFIG_USER_ONLY #include "hw/loader.h" -#endif -#ifndef CONFIG_USER_ONLY static inline void get_phys_nommu(hwaddr *phys_addr, int *prot, target_ulong address) { @@ -94,7 +91,6 @@ static int get_phys_mmu(OpenRISCCPU *cpu, hwaddr *phys_addr, int *prot, return need & PAGE_EXEC ? EXCP_ITLBMISS : EXCP_DTLBMISS; } } -#endif static void raise_mmu_exception(OpenRISCCPU *cpu, target_ulong address, int exception) @@ -112,8 +108,6 @@ bool openrisc_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, { OpenRISCCPU *cpu = OPENRISC_CPU(cs); int excp = EXCP_DPF; - -#ifndef CONFIG_USER_ONLY int prot; hwaddr phys_addr; @@ -138,13 +132,11 @@ bool openrisc_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, if (probe) { return false; } -#endif raise_mmu_exception(cpu, addr, excp); cpu_loop_exit_restore(cs, retaddr); } -#ifndef CONFIG_USER_ONLY hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { OpenRISCCPU *cpu = OPENRISC_CPU(cs); @@ -177,4 +169,3 @@ hwaddr openrisc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) return phys_addr; } } -#endif diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c index 5f3d430245..ca79e609da 100644 --- a/target/openrisc/translate.c +++ b/target/openrisc/translate.c @@ -1659,11 +1659,7 @@ static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) /* The jump destination is indirect/computed; use jmp_pc. */ tcg_gen_mov_tl(cpu_pc, jmp_pc); tcg_gen_discard_tl(jmp_pc); - if (unlikely(dc->base.singlestep_enabled)) { - gen_exception(dc, EXCP_DEBUG); - } else { - tcg_gen_lookup_and_goto_ptr(); - } + tcg_gen_lookup_and_goto_ptr(); break; } /* The jump destination is direct; use jmp_pc_imm. @@ -1680,19 +1676,11 @@ static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) break; } tcg_gen_movi_tl(cpu_pc, jmp_dest); - if (unlikely(dc->base.singlestep_enabled)) { - gen_exception(dc, EXCP_DEBUG); - } else { - tcg_gen_lookup_and_goto_ptr(); - } + tcg_gen_lookup_and_goto_ptr(); break; case DISAS_EXIT: - if (unlikely(dc->base.singlestep_enabled)) { - gen_exception(dc, EXCP_DEBUG); - } else { - tcg_gen_exit_tb(NULL, 0); - } + tcg_gen_exit_tb(NULL, 0); break; default: g_assert_not_reached(); diff --git a/target/ppc/cpu.c b/target/ppc/cpu.c index 7ad9bd6044..f933d9f2bd 100644 --- a/target/ppc/cpu.c +++ b/target/ppc/cpu.c @@ -27,7 +27,7 @@ #include "helper_regs.h" #include "sysemu/tcg.h" -target_ulong cpu_read_xer(CPUPPCState *env) +target_ulong cpu_read_xer(const CPUPPCState *env) { if (is_isa300(env)) { return env->xer | (env->so << XER_SO) | diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index 01d3773bc7..e946da5f3a 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -314,6 +314,7 @@ typedef struct ppc_v3_pate_t { #define MSR_AP 23 /* Access privilege state on 602 hflags */ #define MSR_VSX 23 /* Vector Scalar Extension (ISA 2.06 and later) x hflags */ #define MSR_SA 22 /* Supervisor access mode on 602 hflags */ +#define MSR_S 22 /* Secure state */ #define MSR_KEY 19 /* key bit on 603e */ #define MSR_POW 18 /* Power management */ #define MSR_TGPR 17 /* TGPR usage on 602/603 x */ @@ -342,6 +343,26 @@ typedef struct ppc_v3_pate_t { #define MSR_RI 1 /* Recoverable interrupt 1 */ #define MSR_LE 0 /* Little-endian mode 1 hflags */ +/* PMU bits */ +#define MMCR0_FC PPC_BIT(32) /* Freeze Counters */ +#define MMCR0_PMAO PPC_BIT(56) /* Perf Monitor Alert Ocurred */ +#define MMCR0_PMAE PPC_BIT(37) /* Perf Monitor Alert Enable */ +#define MMCR0_EBE PPC_BIT(43) /* Perf Monitor EBB Enable */ +#define MMCR0_FCECE PPC_BIT(38) /* FC on Enabled Cond or Event */ +#define MMCR0_PMCC0 PPC_BIT(44) /* PMC Control bit 0 */ +#define MMCR0_PMCC1 PPC_BIT(45) /* PMC Control bit 1 */ +/* MMCR0 userspace r/w mask */ +#define MMCR0_UREG_MASK (MMCR0_FC | MMCR0_PMAO | MMCR0_PMAE) +/* MMCR2 userspace r/w mask */ +#define MMCR2_FC1P0 PPC_BIT(1) /* MMCR2 FCnP0 for PMC1 */ +#define MMCR2_FC2P0 PPC_BIT(10) /* MMCR2 FCnP0 for PMC2 */ +#define MMCR2_FC3P0 PPC_BIT(19) /* MMCR2 FCnP0 for PMC3 */ +#define MMCR2_FC4P0 PPC_BIT(28) /* MMCR2 FCnP0 for PMC4 */ +#define MMCR2_FC5P0 PPC_BIT(37) /* MMCR2 FCnP0 for PMC5 */ +#define MMCR2_FC6P0 PPC_BIT(46) /* MMCR2 FCnP0 for PMC6 */ +#define MMCR2_UREG_MASK (MMCR2_FC1P0 | MMCR2_FC2P0 | MMCR2_FC3P0 | \ + MMCR2_FC4P0 | MMCR2_FC5P0 | MMCR2_FC6P0) + /* LPCR bits */ #define LPCR_VPM0 PPC_BIT(0) #define LPCR_VPM1 PPC_BIT(1) @@ -600,12 +621,15 @@ enum { HFLAGS_64 = 2, /* computed from MSR_CE and MSR_SF */ HFLAGS_GTSE = 3, /* computed from SPR_LPCR[GTSE] */ HFLAGS_DR = 4, /* MSR_DR */ + HFLAGS_HR = 5, /* computed from SPR_LPCR[HR] */ HFLAGS_SPE = 6, /* from MSR_SPE if cpu has SPE; avoid overlap w/ MSR_VR */ HFLAGS_TM = 8, /* computed from MSR_TM */ HFLAGS_BE = 9, /* MSR_BE -- from elsewhere on embedded ppc */ HFLAGS_SE = 10, /* MSR_SE -- from elsewhere on embedded ppc */ HFLAGS_FP = 13, /* MSR_FP */ HFLAGS_PR = 14, /* MSR_PR */ + HFLAGS_PMCC0 = 15, /* MMCR0 PMCC bit 0 */ + HFLAGS_PMCC1 = 16, /* MMCR0 PMCC bit 1 */ HFLAGS_VSX = 23, /* MSR_VSX if cpu has VSX */ HFLAGS_VR = 25, /* MSR_VR if cpu has VRE */ @@ -1278,9 +1302,6 @@ extern const VMStateDescription vmstate_ppc_cpu; /*****************************************************************************/ void ppc_translate_init(void); -bool ppc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); #if !defined(CONFIG_USER_ONLY) void ppc_store_sdr1(CPUPPCState *env, target_ulong value); @@ -2411,7 +2432,7 @@ enum { /*****************************************************************************/ #define is_isa300(ctx) (!!(ctx->insns_flags2 & PPC2_ISA300)) -target_ulong cpu_read_xer(CPUPPCState *env); +target_ulong cpu_read_xer(const CPUPPCState *env); void cpu_write_xer(CPUPPCState *env, target_ulong xer); /* diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c index 6aad01d1d3..6695985e9b 100644 --- a/target/ppc/cpu_init.c +++ b/target/ppc/cpu_init.c @@ -6867,7 +6867,7 @@ static void register_book3s_pmu_sup_sprs(CPUPPCState *env) static void register_book3s_pmu_user_sprs(CPUPPCState *env) { spr_register(env, SPR_POWER_UMMCR0, "UMMCR0", - &spr_read_ureg, SPR_NOACCESS, + &spr_read_MMCR0_ureg, &spr_write_MMCR0_ureg, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_UMMCR1, "UMMCR1", @@ -6879,27 +6879,27 @@ static void register_book3s_pmu_user_sprs(CPUPPCState *env) &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_UPMC1, "UPMC1", - &spr_read_ureg, SPR_NOACCESS, + &spr_read_PMC14_ureg, &spr_write_PMC14_ureg, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_UPMC2, "UPMC2", - &spr_read_ureg, SPR_NOACCESS, + &spr_read_PMC14_ureg, &spr_write_PMC14_ureg, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_UPMC3, "UPMC3", - &spr_read_ureg, SPR_NOACCESS, + &spr_read_PMC14_ureg, &spr_write_PMC14_ureg, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_UPMC4, "UPMC4", - &spr_read_ureg, SPR_NOACCESS, + &spr_read_PMC14_ureg, &spr_write_PMC14_ureg, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_UPMC5, "UPMC5", - &spr_read_ureg, SPR_NOACCESS, + &spr_read_PMC56_ureg, &spr_write_PMC56_ureg, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_UPMC6, "UPMC6", - &spr_read_ureg, SPR_NOACCESS, + &spr_read_PMC56_ureg, &spr_write_PMC56_ureg, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_USIAR, "USIAR", @@ -6975,7 +6975,7 @@ static void register_power8_pmu_sup_sprs(CPUPPCState *env) static void register_power8_pmu_user_sprs(CPUPPCState *env) { spr_register(env, SPR_POWER_UMMCR2, "UMMCR2", - &spr_read_ureg, SPR_NOACCESS, + &spr_read_MMCR2_ureg, &spr_write_MMCR2_ureg, &spr_read_ureg, &spr_write_ureg, 0x00000000); spr_register(env, SPR_POWER_USIER, "USIER", @@ -8734,7 +8734,7 @@ void ppc_cpu_list(void) #ifdef CONFIG_KVM qemu_printf("\n"); - qemu_printf("PowerPC %-16s\n", "host"); + qemu_printf("PowerPC %s\n", "host"); #endif } @@ -9014,9 +9014,11 @@ static const struct SysemuCPUOps ppc_sysemu_ops = { static const struct TCGCPUOps ppc_tcg_ops = { .initialize = ppc_translate_init, - .tlb_fill = ppc_cpu_tlb_fill, -#ifndef CONFIG_USER_ONLY +#ifdef CONFIG_USER_ONLY + .record_sigsegv = ppc_cpu_record_sigsegv, +#else + .tlb_fill = ppc_cpu_tlb_fill, .cpu_exec_interrupt = ppc_cpu_exec_interrupt, .do_interrupt = ppc_cpu_do_interrupt, .cpu_exec_enter = ppc_cpu_exec_enter, diff --git a/target/ppc/dfp_helper.c b/target/ppc/dfp_helper.c index 07341a69f5..0d01ac3de0 100644 --- a/target/ppc/dfp_helper.c +++ b/target/ppc/dfp_helper.c @@ -51,6 +51,11 @@ static void set_dfp128(ppc_fprp_t *dfp, ppc_vsr_t *src) dfp[1].VsrD(0) = src->VsrD(1); } +static void set_dfp128_to_avr(ppc_avr_t *dst, ppc_vsr_t *src) +{ + *dst = *src; +} + struct PPC_DFP { CPUPPCState *env; ppc_vsr_t vt, va, vb; @@ -440,8 +445,8 @@ static void ADD_PPs(struct PPC_DFP *dfp) dfp_check_for_VXISI_add(dfp); } -DFP_HELPER_TAB(dadd, decNumberAdd, ADD_PPs, 64) -DFP_HELPER_TAB(daddq, decNumberAdd, ADD_PPs, 128) +DFP_HELPER_TAB(DADD, decNumberAdd, ADD_PPs, 64) +DFP_HELPER_TAB(DADDQ, decNumberAdd, ADD_PPs, 128) static void SUB_PPs(struct PPC_DFP *dfp) { @@ -453,8 +458,8 @@ static void SUB_PPs(struct PPC_DFP *dfp) dfp_check_for_VXISI_subtract(dfp); } -DFP_HELPER_TAB(dsub, decNumberSubtract, SUB_PPs, 64) -DFP_HELPER_TAB(dsubq, decNumberSubtract, SUB_PPs, 128) +DFP_HELPER_TAB(DSUB, decNumberSubtract, SUB_PPs, 64) +DFP_HELPER_TAB(DSUBQ, decNumberSubtract, SUB_PPs, 128) static void MUL_PPs(struct PPC_DFP *dfp) { @@ -466,8 +471,8 @@ static void MUL_PPs(struct PPC_DFP *dfp) dfp_check_for_VXIMZ(dfp); } -DFP_HELPER_TAB(dmul, decNumberMultiply, MUL_PPs, 64) -DFP_HELPER_TAB(dmulq, decNumberMultiply, MUL_PPs, 128) +DFP_HELPER_TAB(DMUL, decNumberMultiply, MUL_PPs, 64) +DFP_HELPER_TAB(DMULQ, decNumberMultiply, MUL_PPs, 128) static void DIV_PPs(struct PPC_DFP *dfp) { @@ -481,8 +486,8 @@ static void DIV_PPs(struct PPC_DFP *dfp) dfp_check_for_VXIDI(dfp); } -DFP_HELPER_TAB(ddiv, decNumberDivide, DIV_PPs, 64) -DFP_HELPER_TAB(ddivq, decNumberDivide, DIV_PPs, 128) +DFP_HELPER_TAB(DDIV, decNumberDivide, DIV_PPs, 64) +DFP_HELPER_TAB(DDIVQ, decNumberDivide, DIV_PPs, 128) #define DFP_HELPER_BF_AB(op, dnop, postprocs, size) \ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \ @@ -502,8 +507,8 @@ static void CMPU_PPs(struct PPC_DFP *dfp) dfp_check_for_VXSNAN(dfp); } -DFP_HELPER_BF_AB(dcmpu, decNumberCompare, CMPU_PPs, 64) -DFP_HELPER_BF_AB(dcmpuq, decNumberCompare, CMPU_PPs, 128) +DFP_HELPER_BF_AB(DCMPU, decNumberCompare, CMPU_PPs, 64) +DFP_HELPER_BF_AB(DCMPUQ, decNumberCompare, CMPU_PPs, 128) static void CMPO_PPs(struct PPC_DFP *dfp) { @@ -513,8 +518,8 @@ static void CMPO_PPs(struct PPC_DFP *dfp) dfp_check_for_VXVC(dfp); } -DFP_HELPER_BF_AB(dcmpo, decNumberCompare, CMPO_PPs, 64) -DFP_HELPER_BF_AB(dcmpoq, decNumberCompare, CMPO_PPs, 128) +DFP_HELPER_BF_AB(DCMPO, decNumberCompare, CMPO_PPs, 64) +DFP_HELPER_BF_AB(DCMPOQ, decNumberCompare, CMPO_PPs, 128) #define DFP_HELPER_TSTDC(op, size) \ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \ @@ -541,8 +546,8 @@ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \ return dfp.crbf; \ } -DFP_HELPER_TSTDC(dtstdc, 64) -DFP_HELPER_TSTDC(dtstdcq, 128) +DFP_HELPER_TSTDC(DTSTDC, 64) +DFP_HELPER_TSTDC(DTSTDCQ, 128) #define DFP_HELPER_TSTDG(op, size) \ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \ @@ -596,8 +601,8 @@ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, uint32_t dcm) \ return dfp.crbf; \ } -DFP_HELPER_TSTDG(dtstdg, 64) -DFP_HELPER_TSTDG(dtstdgq, 128) +DFP_HELPER_TSTDG(DTSTDG, 64) +DFP_HELPER_TSTDG(DTSTDGQ, 128) #define DFP_HELPER_TSTEX(op, size) \ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \ @@ -628,8 +633,8 @@ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \ return dfp.crbf; \ } -DFP_HELPER_TSTEX(dtstex, 64) -DFP_HELPER_TSTEX(dtstexq, 128) +DFP_HELPER_TSTEX(DTSTEX, 64) +DFP_HELPER_TSTEX(DTSTEXQ, 128) #define DFP_HELPER_TSTSF(op, size) \ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \ @@ -665,8 +670,8 @@ uint32_t helper_##op(CPUPPCState *env, ppc_fprp_t *a, ppc_fprp_t *b) \ return dfp.crbf; \ } -DFP_HELPER_TSTSF(dtstsf, 64) -DFP_HELPER_TSTSF(dtstsfq, 128) +DFP_HELPER_TSTSF(DTSTSF, 64) +DFP_HELPER_TSTSF(DTSTSFQ, 128) #define DFP_HELPER_TSTSFI(op, size) \ uint32_t helper_##op(CPUPPCState *env, uint32_t a, ppc_fprp_t *b) \ @@ -700,8 +705,8 @@ uint32_t helper_##op(CPUPPCState *env, uint32_t a, ppc_fprp_t *b) \ return dfp.crbf; \ } -DFP_HELPER_TSTSFI(dtstsfi, 64) -DFP_HELPER_TSTSFI(dtstsfiq, 128) +DFP_HELPER_TSTSFI(DTSTSFI, 64) +DFP_HELPER_TSTSFI(DTSTSFIQ, 128) static void QUA_PPs(struct PPC_DFP *dfp) { @@ -746,8 +751,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ set_dfp##size(t, &dfp.vt); \ } -DFP_HELPER_QUAI(dquai, 64) -DFP_HELPER_QUAI(dquaiq, 128) +DFP_HELPER_QUAI(DQUAI, 64) +DFP_HELPER_QUAI(DQUAIQ, 128) #define DFP_HELPER_QUA(op, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ @@ -764,8 +769,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ set_dfp##size(t, &dfp.vt); \ } -DFP_HELPER_QUA(dqua, 64) -DFP_HELPER_QUA(dquaq, 128) +DFP_HELPER_QUA(DQUA, 64) +DFP_HELPER_QUA(DQUAQ, 128) static void _dfp_reround(uint8_t rmc, int32_t ref_sig, int32_t xmax, struct PPC_DFP *dfp) @@ -842,8 +847,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ set_dfp##size(t, &dfp.vt); \ } -DFP_HELPER_RRND(drrnd, 64) -DFP_HELPER_RRND(drrndq, 128) +DFP_HELPER_RRND(DRRND, 64) +DFP_HELPER_RRND(DRRNDQ, 128) #define DFP_HELPER_RINT(op, postprocs, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ @@ -868,8 +873,8 @@ static void RINTX_PPs(struct PPC_DFP *dfp) dfp_check_for_VXSNAN(dfp); } -DFP_HELPER_RINT(drintx, RINTX_PPs, 64) -DFP_HELPER_RINT(drintxq, RINTX_PPs, 128) +DFP_HELPER_RINT(DRINTX, RINTX_PPs, 64) +DFP_HELPER_RINT(DRINTXQ, RINTX_PPs, 128) static void RINTN_PPs(struct PPC_DFP *dfp) { @@ -877,10 +882,10 @@ static void RINTN_PPs(struct PPC_DFP *dfp) dfp_check_for_VXSNAN(dfp); } -DFP_HELPER_RINT(drintn, RINTN_PPs, 64) -DFP_HELPER_RINT(drintnq, RINTN_PPs, 128) +DFP_HELPER_RINT(DRINTN, RINTN_PPs, 64) +DFP_HELPER_RINT(DRINTNQ, RINTN_PPs, 128) -void helper_dctdp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) +void helper_DCTDP(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) { struct PPC_DFP dfp; ppc_vsr_t vb; @@ -896,7 +901,7 @@ void helper_dctdp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) dfp_set_FPRF_from_FRT(&dfp); } -void helper_dctqpq(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) +void helper_DCTQPQ(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) { struct PPC_DFP dfp; ppc_vsr_t vb; @@ -911,7 +916,7 @@ void helper_dctqpq(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) set_dfp128(t, &dfp.vt); } -void helper_drsp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) +void helper_DRSP(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) { struct PPC_DFP dfp; uint32_t t_short = 0; @@ -929,7 +934,7 @@ void helper_drsp(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) set_dfp64(t, &vt); } -void helper_drdpq(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) +void helper_DRDPQ(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) { struct PPC_DFP dfp; dfp_prepare_decimal128(&dfp, 0, b, env); @@ -967,8 +972,20 @@ static void CFFIX_PPs(struct PPC_DFP *dfp) dfp_check_for_XX(dfp); } -DFP_HELPER_CFFIX(dcffix, 64) -DFP_HELPER_CFFIX(dcffixq, 128) +DFP_HELPER_CFFIX(DCFFIX, 64) +DFP_HELPER_CFFIX(DCFFIXQ, 128) + +void helper_DCFFIXQQ(CPUPPCState *env, ppc_fprp_t *t, ppc_avr_t *b) +{ + struct PPC_DFP dfp; + + dfp_prepare_decimal128(&dfp, NULL, NULL, env); + decNumberFromInt128(&dfp.t, (uint64_t)b->VsrD(1), (int64_t)b->VsrD(0)); + dfp_finalize_decimal128(&dfp); + CFFIX_PPs(&dfp); + + set_dfp128(t, &dfp.vt); +} #define DFP_HELPER_CTFIX(op, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \ @@ -1005,8 +1022,55 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \ set_dfp64(t, &dfp.vt); \ } -DFP_HELPER_CTFIX(dctfix, 64) -DFP_HELPER_CTFIX(dctfixq, 128) +DFP_HELPER_CTFIX(DCTFIX, 64) +DFP_HELPER_CTFIX(DCTFIXQ, 128) + +void helper_DCTFIXQQ(CPUPPCState *env, ppc_avr_t *t, ppc_fprp_t *b) +{ + struct PPC_DFP dfp; + dfp_prepare_decimal128(&dfp, 0, b, env); + + if (unlikely(decNumberIsSpecial(&dfp.b))) { + uint64_t invalid_flags = FP_VX | FP_VXCVI; + if (decNumberIsInfinite(&dfp.b)) { + if (decNumberIsNegative(&dfp.b)) { + dfp.vt.VsrD(0) = INT64_MIN; + dfp.vt.VsrD(1) = 0; + } else { + dfp.vt.VsrD(0) = INT64_MAX; + dfp.vt.VsrD(1) = UINT64_MAX; + } + } else { /* NaN */ + dfp.vt.VsrD(0) = INT64_MIN; + dfp.vt.VsrD(1) = 0; + if (decNumberIsSNaN(&dfp.b)) { + invalid_flags |= FP_VXSNAN; + } + } + dfp_set_FPSCR_flag(&dfp, invalid_flags, FP_VE); + } else if (unlikely(decNumberIsZero(&dfp.b))) { + dfp.vt.VsrD(0) = 0; + dfp.vt.VsrD(1) = 0; + } else { + decNumberToIntegralExact(&dfp.b, &dfp.b, &dfp.context); + decNumberIntegralToInt128(&dfp.b, &dfp.context, + &dfp.vt.VsrD(1), &dfp.vt.VsrD(0)); + if (decContextTestStatus(&dfp.context, DEC_Invalid_operation)) { + if (decNumberIsNegative(&dfp.b)) { + dfp.vt.VsrD(0) = INT64_MIN; + dfp.vt.VsrD(1) = 0; + } else { + dfp.vt.VsrD(0) = INT64_MAX; + dfp.vt.VsrD(1) = UINT64_MAX; + } + dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FP_VE); + } else { + dfp_check_for_XX(&dfp); + } + } + + set_dfp128_to_avr(t, &dfp.vt); +} static inline void dfp_set_bcd_digit_64(ppc_vsr_t *t, uint8_t digit, unsigned n) @@ -1067,8 +1131,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ set_dfp##size(t, &dfp.vt); \ } -DFP_HELPER_DEDPD(ddedpd, 64) -DFP_HELPER_DEDPD(ddedpdq, 128) +DFP_HELPER_DEDPD(DDEDPD, 64) +DFP_HELPER_DEDPD(DDEDPDQ, 128) static inline uint8_t dfp_get_bcd_digit_64(ppc_vsr_t *t, unsigned n) { @@ -1135,8 +1199,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ set_dfp##size(t, &dfp.vt); \ } -DFP_HELPER_ENBCD(denbcd, 64) -DFP_HELPER_ENBCD(denbcdq, 128) +DFP_HELPER_ENBCD(DENBCD, 64) +DFP_HELPER_ENBCD(DENBCDQ, 128) #define DFP_HELPER_XEX(op, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \ @@ -1169,8 +1233,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b) \ } \ } -DFP_HELPER_XEX(dxex, 64) -DFP_HELPER_XEX(dxexq, 128) +DFP_HELPER_XEX(DXEX, 64) +DFP_HELPER_XEX(DXEXQ, 128) static void dfp_set_raw_exp_64(ppc_vsr_t *t, uint64_t raw) { @@ -1235,8 +1299,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ set_dfp##size(t, &dfp.vt); \ } -DFP_HELPER_IEX(diex, 64) -DFP_HELPER_IEX(diexq, 128) +DFP_HELPER_IEX(DIEX, 64) +DFP_HELPER_IEX(DIEXQ, 128) static void dfp_clear_lmd_from_g5msb(uint64_t *t) { @@ -1323,7 +1387,7 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *a, \ set_dfp##size(t, &dfp.vt); \ } -DFP_HELPER_SHIFT(dscli, 64, 1) -DFP_HELPER_SHIFT(dscliq, 128, 1) -DFP_HELPER_SHIFT(dscri, 64, 0) -DFP_HELPER_SHIFT(dscriq, 128, 0) +DFP_HELPER_SHIFT(DSCLI, 64, 1) +DFP_HELPER_SHIFT(DSCLIQ, 128, 1) +DFP_HELPER_SHIFT(DSCRI, 64, 0) +DFP_HELPER_SHIFT(DSCRIQ, 128, 0) diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c index d7e32ee107..17607adbe4 100644 --- a/target/ppc/excp_helper.c +++ b/target/ppc/excp_helper.c @@ -23,20 +23,14 @@ #include "internal.h" #include "helper_regs.h" +#include "trace.h" + #ifdef CONFIG_TCG #include "exec/helper-proto.h" #include "exec/cpu_ldst.h" #endif -/* #define DEBUG_OP */ /* #define DEBUG_SOFTWARE_TLB */ -/* #define DEBUG_EXCEPTIONS */ - -#ifdef DEBUG_EXCEPTIONS -# define LOG_EXCP(...) qemu_log(__VA_ARGS__) -#else -# define LOG_EXCP(...) do { } while (0) -#endif /*****************************************************************************/ /* Exception processing */ @@ -414,12 +408,10 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) } break; case POWERPC_EXCP_DSI: /* Data storage exception */ - LOG_EXCP("DSI exception: DSISR=" TARGET_FMT_lx" DAR=" TARGET_FMT_lx - "\n", env->spr[SPR_DSISR], env->spr[SPR_DAR]); + trace_ppc_excp_dsi(env->spr[SPR_DSISR], env->spr[SPR_DAR]); break; case POWERPC_EXCP_ISI: /* Instruction storage exception */ - LOG_EXCP("ISI exception: msr=" TARGET_FMT_lx ", nip=" TARGET_FMT_lx - "\n", msr, env->nip); + trace_ppc_excp_isi(msr, env->nip); msr |= env->error_code; break; case POWERPC_EXCP_EXTERNAL: /* External input */ @@ -462,19 +454,21 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) break; } case POWERPC_EXCP_ALIGN: /* Alignment exception */ - /* Get rS/rD and rA from faulting opcode */ /* - * Note: the opcode fields will not be set properly for a - * direct store load/store, but nobody cares as nobody - * actually uses direct store segments. + * Get rS/rD and rA from faulting opcode. + * Note: We will only invoke ALIGN for atomic operations, + * so all instructions are X-form. */ - env->spr[SPR_DSISR] |= (env->error_code & 0x03FF0000) >> 16; + { + uint32_t insn = cpu_ldl_code(env, env->nip); + env->spr[SPR_DSISR] |= (insn & 0x03FF0000) >> 16; + } break; case POWERPC_EXCP_PROGRAM: /* Program exception */ switch (env->error_code & ~0xF) { case POWERPC_EXCP_FP: if ((msr_fe0 == 0 && msr_fe1 == 0) || msr_fp == 0) { - LOG_EXCP("Ignore floating point exception\n"); + trace_ppc_excp_fp_ignore(); cs->exception_index = POWERPC_EXCP_NONE; env->error_code = 0; return; @@ -489,7 +483,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) env->spr[SPR_BOOKE_ESR] = ESR_FP; break; case POWERPC_EXCP_INVAL: - LOG_EXCP("Invalid instruction at " TARGET_FMT_lx "\n", env->nip); + trace_ppc_excp_inval(env->nip); msr |= 0x00080000; env->spr[SPR_BOOKE_ESR] = ESR_PIL; break; @@ -547,10 +541,10 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) break; case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ /* FIT on 4xx */ - LOG_EXCP("FIT exception\n"); + trace_ppc_excp_print("FIT"); break; case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ - LOG_EXCP("WDT exception\n"); + trace_ppc_excp_print("WDT"); switch (excp_model) { case POWERPC_EXCP_BOOKE: srr0 = SPR_BOOKE_CSRR0; @@ -657,7 +651,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) #endif break; case POWERPC_EXCP_PIT: /* Programmable interval timer interrupt */ - LOG_EXCP("PIT exception\n"); + trace_ppc_excp_print("PIT"); break; case POWERPC_EXCP_IO: /* IO error exception */ /* XXX: TODO */ @@ -1115,14 +1109,6 @@ bool ppc_cpu_exec_interrupt(CPUState *cs, int interrupt_request) #endif /* !CONFIG_USER_ONLY */ -#if defined(DEBUG_OP) -static void cpu_dump_rfi(target_ulong RA, target_ulong msr) -{ - qemu_log("Return from exception at " TARGET_FMT_lx " with flags " - TARGET_FMT_lx "\n", RA, msr); -} -#endif - /*****************************************************************************/ /* Exceptions processing helpers */ @@ -1221,9 +1207,7 @@ static inline void do_rfi(CPUPPCState *env, target_ulong nip, target_ulong msr) /* XXX: beware: this is false if VLE is supported */ env->nip = nip & ~((target_ulong)0x00000003); hreg_store_msr(env, msr, 1); -#if defined(DEBUG_OP) - cpu_dump_rfi(env->nip, env->msr); -#endif + trace_ppc_excp_rfi(env->nip, env->msr); /* * No need to raise an exception here, as rfi is always the last * insn of a TB @@ -1470,24 +1454,31 @@ void helper_book3s_msgsndp(CPUPPCState *env, target_ulong rb) book3s_msgsnd_common(pir, PPC_INTERRUPT_DOORBELL); } -#endif -#endif /* CONFIG_TCG */ -#endif +#endif /* TARGET_PPC64 */ -#ifdef CONFIG_TCG void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { CPUPPCState *env = cs->env_ptr; - uint32_t insn; - /* Restore state and reload the insn we executed, for filling in DSISR. */ - cpu_restore_state(cs, retaddr, true); - insn = cpu_ldl_code(env, env->nip); + switch (env->mmu_model) { + case POWERPC_MMU_SOFT_4xx: + case POWERPC_MMU_SOFT_4xx_Z: + env->spr[SPR_40x_DEAR] = vaddr; + break; + case POWERPC_MMU_BOOKE: + case POWERPC_MMU_BOOKE206: + env->spr[SPR_BOOKE_DEAR] = vaddr; + break; + default: + env->spr[SPR_DAR] = vaddr; + break; + } cs->exception_index = POWERPC_EXCP_ALIGN; - env->error_code = insn & 0x03FF0000; - cpu_loop_exit(cs); + env->error_code = 0; + cpu_loop_exit_restore(cs, retaddr); } -#endif +#endif /* CONFIG_TCG */ +#endif /* !CONFIG_USER_ONLY */ diff --git a/target/ppc/gdbstub.c b/target/ppc/gdbstub.c index 1808a150e4..105c2f7dd1 100644 --- a/target/ppc/gdbstub.c +++ b/target/ppc/gdbstub.c @@ -159,7 +159,7 @@ int ppc_cpu_gdb_read_register(CPUState *cs, GByteArray *buf, int n) gdb_get_regl(buf, env->ctr); break; case 69: - gdb_get_reg32(buf, env->xer); + gdb_get_reg32(buf, cpu_read_xer(env)); break; case 70: gdb_get_reg32(buf, env->fpscr); @@ -217,7 +217,7 @@ int ppc_cpu_gdb_read_register_apple(CPUState *cs, GByteArray *buf, int n) gdb_get_reg64(buf, env->ctr); break; case 69 + 32: - gdb_get_reg32(buf, env->xer); + gdb_get_reg32(buf, cpu_read_xer(env)); break; case 70 + 32: gdb_get_reg64(buf, env->fpscr); @@ -269,7 +269,7 @@ int ppc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) env->ctr = ldtul_p(mem_buf); break; case 69: - env->xer = ldl_p(mem_buf); + cpu_write_xer(env, ldl_p(mem_buf)); break; case 70: /* fpscr */ @@ -319,7 +319,7 @@ int ppc_cpu_gdb_write_register_apple(CPUState *cs, uint8_t *mem_buf, int n) env->ctr = ldq_p(mem_buf); break; case 69 + 32: - env->xer = ldl_p(mem_buf); + cpu_write_xer(env, ldl_p(mem_buf)); break; case 70 + 32: /* fpscr */ diff --git a/target/ppc/helper.h b/target/ppc/helper.h index 4076aa281e..627811cefc 100644 --- a/target/ppc/helper.h +++ b/target/ppc/helper.h @@ -46,7 +46,9 @@ DEF_HELPER_4(divwe, tl, env, tl, tl, i32) DEF_HELPER_FLAGS_1(popcntb, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_2(cmpb, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_3(sraw, tl, env, tl, tl) -DEF_HELPER_FLAGS_2(cfuged, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(CFUGED, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(PDEPD, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(PEXTD, TCG_CALL_NO_RWG_SE, i64, i64, i64) #if defined(TARGET_PPC64) DEF_HELPER_FLAGS_2(cmpeqb, TCG_CALL_NO_RWG_SE, i32, tl, tl) DEF_HELPER_FLAGS_1(popcntw, TCG_CALL_NO_RWG_SE, tl, tl) @@ -222,10 +224,10 @@ DEF_HELPER_3(vextractub, void, avr, avr, i32) DEF_HELPER_3(vextractuh, void, avr, avr, i32) DEF_HELPER_3(vextractuw, void, avr, avr, i32) DEF_HELPER_3(vextractd, void, avr, avr, i32) -DEF_HELPER_3(vinsertb, void, avr, avr, i32) -DEF_HELPER_3(vinserth, void, avr, avr, i32) -DEF_HELPER_3(vinsertw, void, avr, avr, i32) -DEF_HELPER_3(vinsertd, void, avr, avr, i32) +DEF_HELPER_4(VINSBLX, void, env, avr, i64, tl) +DEF_HELPER_4(VINSHLX, void, env, avr, i64, tl) +DEF_HELPER_4(VINSWLX, void, env, avr, i64, tl) +DEF_HELPER_4(VINSDLX, void, env, avr, i64, tl) DEF_HELPER_2(vextsb2w, void, avr, avr) DEF_HELPER_2(vextsh2w, void, avr, avr) DEF_HELPER_2(vextsb2d, void, avr, avr) @@ -332,6 +334,10 @@ DEF_HELPER_2(vextuwlx, tl, tl, avr) DEF_HELPER_2(vextubrx, tl, tl, avr) DEF_HELPER_2(vextuhrx, tl, tl, avr) DEF_HELPER_2(vextuwrx, tl, tl, avr) +DEF_HELPER_5(VEXTDUBVLX, void, env, avr, avr, avr, tl) +DEF_HELPER_5(VEXTDUHVLX, void, env, avr, avr, avr, tl) +DEF_HELPER_5(VEXTDUWVLX, void, env, avr, avr, avr, tl) +DEF_HELPER_5(VEXTDDVLX, void, env, avr, avr, avr, tl) DEF_HELPER_2(vsbox, void, avr, avr) DEF_HELPER_3(vcipher, void, avr, avr, avr) @@ -514,6 +520,10 @@ DEF_HELPER_4(xxpermr, void, env, vsr, vsr, vsr) DEF_HELPER_4(xxextractuw, void, env, vsr, vsr, i32) DEF_HELPER_4(xxinsertw, void, env, vsr, vsr, i32) DEF_HELPER_3(xvxsigsp, void, env, vsr, vsr) +DEF_HELPER_5(XXBLENDVB, void, vsr, vsr, vsr, vsr, i32) +DEF_HELPER_5(XXBLENDVH, void, vsr, vsr, vsr, vsr, i32) +DEF_HELPER_5(XXBLENDVW, void, vsr, vsr, vsr, vsr, i32) +DEF_HELPER_5(XXBLENDVD, void, vsr, vsr, vsr, vsr, i32) DEF_HELPER_2(efscfsi, i32, env, i32) DEF_HELPER_2(efscfui, i32, env, i32) @@ -696,58 +706,60 @@ DEF_HELPER_3(store_601_batu, void, env, i32, tl) #define dh_alias_fprp ptr #define dh_ctype_fprp ppc_fprp_t * -DEF_HELPER_4(dadd, void, env, fprp, fprp, fprp) -DEF_HELPER_4(daddq, void, env, fprp, fprp, fprp) -DEF_HELPER_4(dsub, void, env, fprp, fprp, fprp) -DEF_HELPER_4(dsubq, void, env, fprp, fprp, fprp) -DEF_HELPER_4(dmul, void, env, fprp, fprp, fprp) -DEF_HELPER_4(dmulq, void, env, fprp, fprp, fprp) -DEF_HELPER_4(ddiv, void, env, fprp, fprp, fprp) -DEF_HELPER_4(ddivq, void, env, fprp, fprp, fprp) -DEF_HELPER_3(dcmpo, i32, env, fprp, fprp) -DEF_HELPER_3(dcmpoq, i32, env, fprp, fprp) -DEF_HELPER_3(dcmpu, i32, env, fprp, fprp) -DEF_HELPER_3(dcmpuq, i32, env, fprp, fprp) -DEF_HELPER_3(dtstdc, i32, env, fprp, i32) -DEF_HELPER_3(dtstdcq, i32, env, fprp, i32) -DEF_HELPER_3(dtstdg, i32, env, fprp, i32) -DEF_HELPER_3(dtstdgq, i32, env, fprp, i32) -DEF_HELPER_3(dtstex, i32, env, fprp, fprp) -DEF_HELPER_3(dtstexq, i32, env, fprp, fprp) -DEF_HELPER_3(dtstsf, i32, env, fprp, fprp) -DEF_HELPER_3(dtstsfq, i32, env, fprp, fprp) -DEF_HELPER_3(dtstsfi, i32, env, i32, fprp) -DEF_HELPER_3(dtstsfiq, i32, env, i32, fprp) -DEF_HELPER_5(dquai, void, env, fprp, fprp, i32, i32) -DEF_HELPER_5(dquaiq, void, env, fprp, fprp, i32, i32) -DEF_HELPER_5(dqua, void, env, fprp, fprp, fprp, i32) -DEF_HELPER_5(dquaq, void, env, fprp, fprp, fprp, i32) -DEF_HELPER_5(drrnd, void, env, fprp, fprp, fprp, i32) -DEF_HELPER_5(drrndq, void, env, fprp, fprp, fprp, i32) -DEF_HELPER_5(drintx, void, env, fprp, fprp, i32, i32) -DEF_HELPER_5(drintxq, void, env, fprp, fprp, i32, i32) -DEF_HELPER_5(drintn, void, env, fprp, fprp, i32, i32) -DEF_HELPER_5(drintnq, void, env, fprp, fprp, i32, i32) -DEF_HELPER_3(dctdp, void, env, fprp, fprp) -DEF_HELPER_3(dctqpq, void, env, fprp, fprp) -DEF_HELPER_3(drsp, void, env, fprp, fprp) -DEF_HELPER_3(drdpq, void, env, fprp, fprp) -DEF_HELPER_3(dcffix, void, env, fprp, fprp) -DEF_HELPER_3(dcffixq, void, env, fprp, fprp) -DEF_HELPER_3(dctfix, void, env, fprp, fprp) -DEF_HELPER_3(dctfixq, void, env, fprp, fprp) -DEF_HELPER_4(ddedpd, void, env, fprp, fprp, i32) -DEF_HELPER_4(ddedpdq, void, env, fprp, fprp, i32) -DEF_HELPER_4(denbcd, void, env, fprp, fprp, i32) -DEF_HELPER_4(denbcdq, void, env, fprp, fprp, i32) -DEF_HELPER_3(dxex, void, env, fprp, fprp) -DEF_HELPER_3(dxexq, void, env, fprp, fprp) -DEF_HELPER_4(diex, void, env, fprp, fprp, fprp) -DEF_HELPER_4(diexq, void, env, fprp, fprp, fprp) -DEF_HELPER_4(dscri, void, env, fprp, fprp, i32) -DEF_HELPER_4(dscriq, void, env, fprp, fprp, i32) -DEF_HELPER_4(dscli, void, env, fprp, fprp, i32) -DEF_HELPER_4(dscliq, void, env, fprp, fprp, i32) +DEF_HELPER_4(DADD, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DADDQ, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DSUB, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DSUBQ, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DMUL, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DMULQ, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DDIV, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DDIVQ, void, env, fprp, fprp, fprp) +DEF_HELPER_3(DCMPO, i32, env, fprp, fprp) +DEF_HELPER_3(DCMPOQ, i32, env, fprp, fprp) +DEF_HELPER_3(DCMPU, i32, env, fprp, fprp) +DEF_HELPER_3(DCMPUQ, i32, env, fprp, fprp) +DEF_HELPER_3(DTSTDC, i32, env, fprp, i32) +DEF_HELPER_3(DTSTDCQ, i32, env, fprp, i32) +DEF_HELPER_3(DTSTDG, i32, env, fprp, i32) +DEF_HELPER_3(DTSTDGQ, i32, env, fprp, i32) +DEF_HELPER_3(DTSTEX, i32, env, fprp, fprp) +DEF_HELPER_3(DTSTEXQ, i32, env, fprp, fprp) +DEF_HELPER_3(DTSTSF, i32, env, fprp, fprp) +DEF_HELPER_3(DTSTSFQ, i32, env, fprp, fprp) +DEF_HELPER_3(DTSTSFI, i32, env, i32, fprp) +DEF_HELPER_3(DTSTSFIQ, i32, env, i32, fprp) +DEF_HELPER_5(DQUAI, void, env, fprp, fprp, i32, i32) +DEF_HELPER_5(DQUAIQ, void, env, fprp, fprp, i32, i32) +DEF_HELPER_5(DQUA, void, env, fprp, fprp, fprp, i32) +DEF_HELPER_5(DQUAQ, void, env, fprp, fprp, fprp, i32) +DEF_HELPER_5(DRRND, void, env, fprp, fprp, fprp, i32) +DEF_HELPER_5(DRRNDQ, void, env, fprp, fprp, fprp, i32) +DEF_HELPER_5(DRINTX, void, env, fprp, fprp, i32, i32) +DEF_HELPER_5(DRINTXQ, void, env, fprp, fprp, i32, i32) +DEF_HELPER_5(DRINTN, void, env, fprp, fprp, i32, i32) +DEF_HELPER_5(DRINTNQ, void, env, fprp, fprp, i32, i32) +DEF_HELPER_3(DCTDP, void, env, fprp, fprp) +DEF_HELPER_3(DCTQPQ, void, env, fprp, fprp) +DEF_HELPER_3(DRSP, void, env, fprp, fprp) +DEF_HELPER_3(DRDPQ, void, env, fprp, fprp) +DEF_HELPER_3(DCFFIX, void, env, fprp, fprp) +DEF_HELPER_3(DCFFIXQ, void, env, fprp, fprp) +DEF_HELPER_3(DCFFIXQQ, void, env, fprp, avr) +DEF_HELPER_3(DCTFIX, void, env, fprp, fprp) +DEF_HELPER_3(DCTFIXQ, void, env, fprp, fprp) +DEF_HELPER_3(DCTFIXQQ, void, env, avr, fprp) +DEF_HELPER_4(DDEDPD, void, env, fprp, fprp, i32) +DEF_HELPER_4(DDEDPDQ, void, env, fprp, fprp, i32) +DEF_HELPER_4(DENBCD, void, env, fprp, fprp, i32) +DEF_HELPER_4(DENBCDQ, void, env, fprp, fprp, i32) +DEF_HELPER_3(DXEX, void, env, fprp, fprp) +DEF_HELPER_3(DXEXQ, void, env, fprp, fprp) +DEF_HELPER_4(DIEX, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DIEXQ, void, env, fprp, fprp, fprp) +DEF_HELPER_4(DSCRI, void, env, fprp, fprp, i32) +DEF_HELPER_4(DSCRIQ, void, env, fprp, fprp, i32) +DEF_HELPER_4(DSCLI, void, env, fprp, fprp, i32) +DEF_HELPER_4(DSCLIQ, void, env, fprp, fprp, i32) DEF_HELPER_1(tbegin, void, env) DEF_HELPER_FLAGS_1(fixup_thrm, TCG_CALL_NO_RWG, void, env) diff --git a/target/ppc/helper_regs.c b/target/ppc/helper_regs.c index 405450d863..99562edd57 100644 --- a/target/ppc/helper_regs.c +++ b/target/ppc/helper_regs.c @@ -106,6 +106,15 @@ static uint32_t hreg_compute_hflags_value(CPUPPCState *env) if (env->spr[SPR_LPCR] & LPCR_GTSE) { hflags |= 1 << HFLAGS_GTSE; } + if (env->spr[SPR_LPCR] & LPCR_HR) { + hflags |= 1 << HFLAGS_HR; + } + if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC0) { + hflags |= 1 << HFLAGS_PMCC0; + } + if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCC1) { + hflags |= 1 << HFLAGS_PMCC1; + } #ifndef CONFIG_USER_ONLY if (!env->has_hv_mode || (msr & (1ull << MSR_HV))) { diff --git a/target/ppc/insn32.decode b/target/ppc/insn32.decode index 9fd8d6b817..e135b8aba4 100644 --- a/target/ppc/insn32.decode +++ b/target/ppc/insn32.decode @@ -24,25 +24,142 @@ @D_bfs ...... bf:3 - l:1 ra:5 imm:s16 &D_bf @D_bfu ...... bf:3 - l:1 ra:5 imm:16 &D_bf +%dq_si 4:s12 !function=times_16 +%dq_rtp 22:4 !function=times_2 +@DQ_rtp ...... ....0 ra:5 ............ .... &D rt=%dq_rtp si=%dq_si + +%dq_rt_tsx 3:1 21:5 +@DQ_TSX ...... ..... ra:5 ............ .... &D si=%dq_si rt=%dq_rt_tsx + +%rt_tsxp 21:1 22:4 !function=times_2 +@DQ_TSXP ...... ..... ra:5 ............ .... &D si=%dq_si rt=%rt_tsxp + %ds_si 2:s14 !function=times_4 @DS ...... rt:5 ra:5 .............. .. &D si=%ds_si +%ds_rtp 22:4 !function=times_2 +@DS_rtp ...... ....0 ra:5 .............. .. &D rt=%ds_rtp si=%ds_si + &DX rt d %dx_d 6:s10 16:5 0:1 @DX ...... rt:5 ..... .......... ..... . &DX d=%dx_d +&VA vrt vra vrb rc +@VA ...... vrt:5 vra:5 vrb:5 rc:5 ...... &VA + +&VN vrt vra vrb sh +@VN ...... vrt:5 vra:5 vrb:5 .. sh:3 ...... &VN + &VX vrt vra vrb @VX ...... vrt:5 vra:5 vrb:5 .......... . &VX +&VX_uim4 vrt uim vrb +@VX_uim4 ...... vrt:5 . uim:4 vrb:5 ........... &VX_uim4 + &X rt ra rb @X ...... rt:5 ra:5 rb:5 .......... . &X +&X_rc rt ra rb rc:bool +@X_rc ...... rt:5 ra:5 rb:5 .......... rc:1 &X_rc + +%x_frtp 22:4 !function=times_2 +%x_frap 17:4 !function=times_2 +%x_frbp 12:4 !function=times_2 +@X_tp_ap_bp_rc ...... ....0 ....0 ....0 .......... rc:1 &X_rc rt=%x_frtp ra=%x_frap rb=%x_frbp + +@X_tp_a_bp_rc ...... ....0 ra:5 ....0 .......... rc:1 &X_rc rt=%x_frtp rb=%x_frbp + +&X_tb_rc rt rb rc:bool +@X_tb_rc ...... rt:5 ..... rb:5 .......... rc:1 &X_tb_rc + +@X_tbp_rc ...... ....0 ..... ....0 .......... rc:1 &X_tb_rc rt=%x_frtp rb=%x_frbp + +@X_tp_b_rc ...... ....0 ..... rb:5 .......... rc:1 &X_tb_rc rt=%x_frtp + +@X_t_bp_rc ...... rt:5 ..... ....0 .......... rc:1 &X_tb_rc rb=%x_frbp + &X_bi rt bi @X_bi ...... rt:5 bi:5 ----- .......... - &X_bi +&X_bf bf ra rb +@X_bf ...... bf:3 .. ra:5 rb:5 .......... . &X_bf + +@X_bf_ap_bp ...... bf:3 .. ....0 ....0 .......... . &X_bf ra=%x_frap rb=%x_frbp + +@X_bf_a_bp ...... bf:3 .. ra:5 ....0 .......... . &X_bf rb=%x_frbp + +&X_bf_uim bf uim rb +@X_bf_uim ...... bf:3 . uim:6 rb:5 .......... . &X_bf_uim + +@X_bf_uim_bp ...... bf:3 . uim:6 ....0 .......... . &X_bf_uim rb=%x_frbp + &X_bfl bf l:bool ra rb @X_bfl ...... bf:3 - l:1 ra:5 rb:5 ..........- &X_bfl +%x_xt 0:1 21:5 +&X_imm8 xt imm:uint8_t +@X_imm8 ...... ..... .. imm:8 .......... . &X_imm8 xt=%x_xt + +&X_uim5 xt uim:uint8_t +@X_uim5 ...... ..... ..... uim:5 .......... . &X_uim5 xt=%x_xt + +&X_tb_sp_rc rt rb sp rc:bool +@X_tb_sp_rc ...... rt:5 sp:2 ... rb:5 .......... rc:1 &X_tb_sp_rc + +@X_tbp_sp_rc ...... ....0 sp:2 ... ....0 .......... rc:1 &X_tb_sp_rc rt=%x_frtp rb=%x_frbp + +&X_tb_s_rc rt rb s:bool rc:bool +@X_tb_s_rc ...... rt:5 s:1 .... rb:5 .......... rc:1 &X_tb_s_rc + +@X_tbp_s_rc ...... ....0 s:1 .... ....0 .......... rc:1 &X_tb_s_rc rt=%x_frtp rb=%x_frbp + +%x_rt_tsx 0:1 21:5 +@X_TSX ...... ..... ra:5 rb:5 .......... . &X rt=%x_rt_tsx +@X_TSXP ...... ..... ra:5 rb:5 .......... . &X rt=%rt_tsxp + +&X_frtp_vrb frtp vrb +@X_frtp_vrb ...... ....0 ..... vrb:5 .......... . &X_frtp_vrb frtp=%x_frtp + +&X_vrt_frbp vrt frbp +@X_vrt_frbp ...... vrt:5 ..... ....0 .......... . &X_vrt_frbp frbp=%x_frbp + +&XX2 xt xb uim:uint8_t +%xx2_xt 0:1 21:5 +%xx2_xb 1:1 11:5 +@XX2 ...... ..... ... uim:2 ..... ......... .. &XX2 xt=%xx2_xt xb=%xx2_xb + +&Z22_bf_fra bf fra dm +@Z22_bf_fra ...... bf:3 .. fra:5 dm:6 ......... . &Z22_bf_fra + +%z22_frap 17:4 !function=times_2 +@Z22_bf_frap ...... bf:3 .. ....0 dm:6 ......... . &Z22_bf_fra fra=%z22_frap + +&Z22_ta_sh_rc rt ra sh rc:bool +@Z22_ta_sh_rc ...... rt:5 ra:5 sh:6 ......... rc:1 &Z22_ta_sh_rc + +%z22_frtp 22:4 !function=times_2 +@Z22_tap_sh_rc ...... ....0 ....0 sh:6 ......... rc:1 &Z22_ta_sh_rc rt=%z22_frtp ra=%z22_frap + +&Z23_tab frt fra frb rmc rc:bool +@Z23_tab ...... frt:5 fra:5 frb:5 rmc:2 ........ rc:1 &Z23_tab + +%z23_frtp 22:4 !function=times_2 +%z23_frap 17:4 !function=times_2 +%z23_frbp 12:4 !function=times_2 +@Z23_tabp ...... ....0 ....0 ....0 rmc:2 ........ rc:1 &Z23_tab frt=%z23_frtp fra=%z23_frap frb=%z23_frbp + +@Z23_tp_a_bp ...... ....0 fra:5 ....0 rmc:2 ........ rc:1 &Z23_tab frt=%z23_frtp frb=%z23_frbp + +&Z23_tb frt frb r:bool rmc rc:bool +@Z23_tb ...... frt:5 .... r:1 frb:5 rmc:2 ........ rc:1 &Z23_tb + +@Z23_tbp ...... ....0 .... r:1 ....0 rmc:2 ........ rc:1 &Z23_tb frt=%z23_frtp frb=%z23_frbp + +&Z23_te_tb te frt frb rmc rc:bool +@Z23_te_tb ...... frt:5 te:5 frb:5 rmc:2 ........ rc:1 &Z23_te_tb + +@Z23_te_tbp ...... ....0 te:5 ....0 rmc:2 ........ rc:1 &Z23_te_tb frt=%z23_frtp frb=%z23_frbp + ### Fixed-Point Load Instructions LBZ 100010 ..... ..... ................ @D @@ -74,6 +191,8 @@ LDU 111010 ..... ..... ..............01 @DS LDX 011111 ..... ..... ..... 0000010101 - @X LDUX 011111 ..... ..... ..... 0000110101 - @X +LQ 111000 ..... ..... ............ ---- @DQ_rtp + ### Fixed-Point Store Instructions STB 100110 ..... ..... ................ @D @@ -96,6 +215,8 @@ STDU 111110 ..... ..... ..............01 @DS STDX 011111 ..... ..... ..... 0010010101 - @X STDUX 011111 ..... ..... ..... 0010110101 - @X +STQ 111110 ..... ..... ..............10 @DS_rtp + ### Fixed-Point Compare Instructions CMP 011111 ... - . ..... ..... 0000000000 - @X_bfl @@ -113,6 +234,34 @@ ADDPCIS 010011 ..... ..... .......... 00010 . @DX ## Fixed-Point Logical Instructions CFUGED 011111 ..... ..... ..... 0011011100 - @X +CNTLZDM 011111 ..... ..... ..... 0000111011 - @X +CNTTZDM 011111 ..... ..... ..... 1000111011 - @X +PDEPD 011111 ..... ..... ..... 0010011100 - @X +PEXTD 011111 ..... ..... ..... 0010111100 - @X + +### Float-Point Load Instructions + +LFS 110000 ..... ..... ................ @D +LFSU 110001 ..... ..... ................ @D +LFSX 011111 ..... ..... ..... 1000010111 - @X +LFSUX 011111 ..... ..... ..... 1000110111 - @X + +LFD 110010 ..... ..... ................ @D +LFDU 110011 ..... ..... ................ @D +LFDX 011111 ..... ..... ..... 1001010111 - @X +LFDUX 011111 ..... ..... ..... 1001110111 - @X + +### Float-Point Store Instructions + +STFS 110100 ..... ...... ............... @D +STFSU 110101 ..... ...... ............... @D +STFSX 011111 ..... ...... .... 1010010111 - @X +STFSUX 011111 ..... ...... .... 1010110111 - @X + +STFD 110110 ..... ...... ............... @D +STFDU 110111 ..... ...... ............... @D +STFDX 011111 ..... ...... .... 1011010111 - @X +STFDUX 011111 ..... ...... .... 1011110111 - @X ### Move To/From System Register Instructions @@ -121,6 +270,160 @@ SETBCR 011111 ..... ..... ----- 0110100000 - @X_bi SETNBC 011111 ..... ..... ----- 0111000000 - @X_bi SETNBCR 011111 ..... ..... ----- 0111100000 - @X_bi +### Decimal Floating-Point Arithmetic Instructions + +DADD 111011 ..... ..... ..... 0000000010 . @X_rc +DADDQ 111111 ..... ..... ..... 0000000010 . @X_tp_ap_bp_rc + +DSUB 111011 ..... ..... ..... 1000000010 . @X_rc +DSUBQ 111111 ..... ..... ..... 1000000010 . @X_tp_ap_bp_rc + +DMUL 111011 ..... ..... ..... 0000100010 . @X_rc +DMULQ 111111 ..... ..... ..... 0000100010 . @X_tp_ap_bp_rc + +DDIV 111011 ..... ..... ..... 1000100010 . @X_rc +DDIVQ 111111 ..... ..... ..... 1000100010 . @X_tp_ap_bp_rc + +### Decimal Floating-Point Compare Instructions + +DCMPU 111011 ... -- ..... ..... 1010000010 - @X_bf +DCMPUQ 111111 ... -- ..... ..... 1010000010 - @X_bf_ap_bp + +DCMPO 111011 ... -- ..... ..... 0010000010 - @X_bf +DCMPOQ 111111 ... -- ..... ..... 0010000010 - @X_bf_ap_bp + +### Decimal Floating-Point Test Instructions + +DTSTDC 111011 ... -- ..... ...... 011000010 - @Z22_bf_fra +DTSTDCQ 111111 ... -- ..... ...... 011000010 - @Z22_bf_frap + +DTSTDG 111011 ... -- ..... ...... 011100010 - @Z22_bf_fra +DTSTDGQ 111111 ... -- ..... ...... 011100010 - @Z22_bf_frap + +DTSTEX 111011 ... -- ..... ..... 0010100010 - @X_bf +DTSTEXQ 111111 ... -- ..... ..... 0010100010 - @X_bf_ap_bp + +DTSTSF 111011 ... -- ..... ..... 1010100010 - @X_bf +DTSTSFQ 111111 ... -- ..... ..... 1010100010 - @X_bf_a_bp + +DTSTSFI 111011 ... - ...... ..... 1010100011 - @X_bf_uim +DTSTSFIQ 111111 ... - ...... ..... 1010100011 - @X_bf_uim_bp + +### Decimal Floating-Point Quantum Adjustment Instructions + +DQUAI 111011 ..... ..... ..... .. 01000011 . @Z23_te_tb +DQUAIQ 111111 ..... ..... ..... .. 01000011 . @Z23_te_tbp + +DQUA 111011 ..... ..... ..... .. 00000011 . @Z23_tab +DQUAQ 111111 ..... ..... ..... .. 00000011 . @Z23_tabp + +DRRND 111011 ..... ..... ..... .. 00100011 . @Z23_tab +DRRNDQ 111111 ..... ..... ..... .. 00100011 . @Z23_tp_a_bp + +DRINTX 111011 ..... ---- . ..... .. 01100011 . @Z23_tb +DRINTXQ 111111 ..... ---- . ..... .. 01100011 . @Z23_tbp + +DRINTN 111011 ..... ---- . ..... .. 11100011 . @Z23_tb +DRINTNQ 111111 ..... ---- . ..... .. 11100011 . @Z23_tbp + +### Decimal Floating-Point Conversion Instructions + +DCTDP 111011 ..... ----- ..... 0100000010 . @X_tb_rc +DCTQPQ 111111 ..... ----- ..... 0100000010 . @X_tp_b_rc + +DRSP 111011 ..... ----- ..... 1100000010 . @X_tb_rc +DRDPQ 111111 ..... ----- ..... 1100000010 . @X_tbp_rc + +DCFFIX 111011 ..... ----- ..... 1100100010 . @X_tb_rc +DCFFIXQ 111111 ..... ----- ..... 1100100010 . @X_tp_b_rc +DCFFIXQQ 111111 ..... 00000 ..... 1111100010 - @X_frtp_vrb + +DCTFIX 111011 ..... ----- ..... 0100100010 . @X_tb_rc +DCTFIXQ 111111 ..... ----- ..... 0100100010 . @X_t_bp_rc +DCTFIXQQ 111111 ..... 00001 ..... 1111100010 - @X_vrt_frbp + +### Decimal Floating-Point Format Instructions + +DDEDPD 111011 ..... .. --- ..... 0101000010 . @X_tb_sp_rc +DDEDPDQ 111111 ..... .. --- ..... 0101000010 . @X_tbp_sp_rc + +DENBCD 111011 ..... . ---- ..... 1101000010 . @X_tb_s_rc +DENBCDQ 111111 ..... . ---- ..... 1101000010 . @X_tbp_s_rc + +DXEX 111011 ..... ----- ..... 0101100010 . @X_tb_rc +DXEXQ 111111 ..... ----- ..... 0101100010 . @X_t_bp_rc + +DIEX 111011 ..... ..... ..... 1101100010 . @X_rc +DIEXQ 111111 ..... ..... ..... 1101100010 . @X_tp_a_bp_rc + +DSCLI 111011 ..... ..... ...... 001000010 . @Z22_ta_sh_rc +DSCLIQ 111111 ..... ..... ...... 001000010 . @Z22_tap_sh_rc + +DSCRI 111011 ..... ..... ...... 001100010 . @Z22_ta_sh_rc +DSCRIQ 111111 ..... ..... ...... 001100010 . @Z22_tap_sh_rc + ## Vector Bit Manipulation Instruction VCFUGED 000100 ..... ..... ..... 10101001101 @VX +VCLZDM 000100 ..... ..... ..... 11110000100 @VX +VCTZDM 000100 ..... ..... ..... 11111000100 @VX +VPDEPD 000100 ..... ..... ..... 10111001101 @VX +VPEXTD 000100 ..... ..... ..... 10110001101 @VX + +## Vector Permute and Formatting Instruction + +VEXTDUBVLX 000100 ..... ..... ..... ..... 011000 @VA +VEXTDUBVRX 000100 ..... ..... ..... ..... 011001 @VA +VEXTDUHVLX 000100 ..... ..... ..... ..... 011010 @VA +VEXTDUHVRX 000100 ..... ..... ..... ..... 011011 @VA +VEXTDUWVLX 000100 ..... ..... ..... ..... 011100 @VA +VEXTDUWVRX 000100 ..... ..... ..... ..... 011101 @VA +VEXTDDVLX 000100 ..... ..... ..... ..... 011110 @VA +VEXTDDVRX 000100 ..... ..... ..... ..... 011111 @VA + +VINSERTB 000100 ..... - .... ..... 01100001101 @VX_uim4 +VINSERTH 000100 ..... - .... ..... 01101001101 @VX_uim4 +VINSERTW 000100 ..... - .... ..... 01110001101 @VX_uim4 +VINSERTD 000100 ..... - .... ..... 01111001101 @VX_uim4 + +VINSBLX 000100 ..... ..... ..... 01000001111 @VX +VINSBRX 000100 ..... ..... ..... 01100001111 @VX +VINSHLX 000100 ..... ..... ..... 01001001111 @VX +VINSHRX 000100 ..... ..... ..... 01101001111 @VX +VINSWLX 000100 ..... ..... ..... 01010001111 @VX +VINSWRX 000100 ..... ..... ..... 01110001111 @VX +VINSDLX 000100 ..... ..... ..... 01011001111 @VX +VINSDRX 000100 ..... ..... ..... 01111001111 @VX + +VINSW 000100 ..... - .... ..... 00011001111 @VX_uim4 +VINSD 000100 ..... - .... ..... 00111001111 @VX_uim4 + +VINSBVLX 000100 ..... ..... ..... 00000001111 @VX +VINSBVRX 000100 ..... ..... ..... 00100001111 @VX +VINSHVLX 000100 ..... ..... ..... 00001001111 @VX +VINSHVRX 000100 ..... ..... ..... 00101001111 @VX +VINSWVLX 000100 ..... ..... ..... 00010001111 @VX +VINSWVRX 000100 ..... ..... ..... 00110001111 @VX + +VSLDBI 000100 ..... ..... ..... 00 ... 010110 @VN +VSRDBI 000100 ..... ..... ..... 01 ... 010110 @VN + +# VSX Load/Store Instructions + +LXV 111101 ..... ..... ............ . 001 @DQ_TSX +STXV 111101 ..... ..... ............ . 101 @DQ_TSX +LXVP 000110 ..... ..... ............ 0000 @DQ_TSXP +STXVP 000110 ..... ..... ............ 0001 @DQ_TSXP +LXVX 011111 ..... ..... ..... 0100 - 01100 . @X_TSX +STXVX 011111 ..... ..... ..... 0110001100 . @X_TSX +LXVPX 011111 ..... ..... ..... 0101001101 - @X_TSXP +STXVPX 011111 ..... ..... ..... 0111001101 - @X_TSXP + +## VSX splat instruction + +XXSPLTIB 111100 ..... 00 ........ 0101101000 . @X_imm8 +XXSPLTW 111100 ..... ---.. ..... 010100100 . . @XX2 + +## VSX Vector Load Special Value Instruction + +LXVKQ 111100 ..... 11111 ..... 0101101000 . @X_uim5 diff --git a/target/ppc/insn64.decode b/target/ppc/insn64.decode index 72c5944a53..39e610913d 100644 --- a/target/ppc/insn64.decode +++ b/target/ppc/insn64.decode @@ -23,6 +23,36 @@ @PLS_D ...... .. ... r:1 .. .................. \ ...... rt:5 ra:5 ................ \ &PLS_D si=%pls_si +@8LS_D_TSX ...... .. . .. r:1 .. .................. \ + ..... rt:6 ra:5 ................ \ + &PLS_D si=%pls_si + +%rt_tsxp 21:1 22:4 !function=times_2 +@8LS_D_TSXP ...... .. . .. r:1 .. .................. \ + ...... ..... ra:5 ................ \ + &PLS_D si=%pls_si rt=%rt_tsxp + +# Format 8RR:D +%8rr_si 32:s16 0:16 +%8rr_xt 16:1 21:5 +&8RR_D_IX xt ix si +@8RR_D_IX ...... .. .... .. .. ................ \ + ...... ..... ... ix:1 . ................ \ + &8RR_D_IX si=%8rr_si xt=%8rr_xt +&8RR_D xt si:int32_t +@8RR_D ...... .. .... .. .. ................ \ + ...... ..... .... . ................ \ + &8RR_D si=%8rr_si xt=%8rr_xt + +# Format XX4 +&XX4 xt xa xb xc +%xx4_xt 0:1 21:5 +%xx4_xa 2:1 16:5 +%xx4_xb 1:1 11:5 +%xx4_xc 3:1 6:5 +@XX4 ........ ........ ........ ........ \ + ...... ..... ..... ..... ..... .. .... \ + &XX4 xt=%xx4_xt xa=%xx4_xa xb=%xx4_xb xc=%xx4_xc ### Fixed-Point Load Instructions @@ -38,6 +68,8 @@ PLWA 000001 00 0--.-- .................. \ 101001 ..... ..... ................ @PLS_D PLD 000001 00 0--.-- .................. \ 111001 ..... ..... ................ @PLS_D +PLQ 000001 00 0--.-- .................. \ + 111000 ..... ..... ................ @PLS_D ### Fixed-Point Store Instructions @@ -50,12 +82,25 @@ PSTH 000001 10 0--.-- .................. \ PSTD 000001 00 0--.-- .................. \ 111101 ..... ..... ................ @PLS_D +PSTQ 000001 00 0--.-- .................. \ + 111100 ..... ..... ................ @PLS_D ### Fixed-Point Arithmetic Instructions PADDI 000001 10 0--.-- .................. \ 001110 ..... ..... ................ @PLS_D +### Float-Point Load and Store Instructions + +PLFS 000001 10 0--.-- .................. \ + 110000 ..... ..... ................ @PLS_D +PLFD 000001 10 0--.-- .................. \ + 110010 ..... ..... ................ @PLS_D +PSTFS 000001 10 0--.-- .................. \ + 110100 ..... ..... ................ @PLS_D +PSTFD 000001 10 0--.-- .................. \ + 110110 ..... ..... ................ @PLS_D + ### Prefixed No-operation Instruction @PNOP 000001 11 0000-- 000000000000000000 \ @@ -122,3 +167,30 @@ PADDI 000001 10 0--.-- .................. \ PNOP ................................ \ -------------------------------- @PNOP } + +### VSX instructions + +PLXV 000001 00 0--.-- .................. \ + 11001 ...... ..... ................ @8LS_D_TSX +PSTXV 000001 00 0--.-- .................. \ + 11011 ...... ..... ................ @8LS_D_TSX +PLXVP 000001 00 0--.-- .................. \ + 111010 ..... ..... ................ @8LS_D_TSXP +PSTXVP 000001 00 0--.-- .................. \ + 111110 ..... ..... ................ @8LS_D_TSXP + +XXSPLTIDP 000001 01 0000 -- -- ................ \ + 100000 ..... 0010 . ................ @8RR_D +XXSPLTIW 000001 01 0000 -- -- ................ \ + 100000 ..... 0011 . ................ @8RR_D +XXSPLTI32DX 000001 01 0000 -- -- ................ \ + 100000 ..... 000 .. ................ @8RR_D_IX + +XXBLENDVD 000001 01 0000 -- ------------------ \ + 100001 ..... ..... ..... ..... 11 .... @XX4 +XXBLENDVW 000001 01 0000 -- ------------------ \ + 100001 ..... ..... ..... ..... 10 .... @XX4 +XXBLENDVH 000001 01 0000 -- ------------------ \ + 100001 ..... ..... ..... ..... 01 .... @XX4 +XXBLENDVB 000001 01 0000 -- ------------------ \ + 100001 ..... ..... ..... ..... 00 .... @XX4 diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c index c2d3248d1e..9bc327bcba 100644 --- a/target/ppc/int_helper.c +++ b/target/ppc/int_helper.c @@ -104,10 +104,11 @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe) uint64_t rt = 0; int overflow = 0; - overflow = divu128(&rt, &ra, rb); - - if (unlikely(overflow)) { + if (unlikely(rb == 0 || ra >= rb)) { + overflow = 1; rt = 0; /* Undefined */ + } else { + divu128(&rt, &ra, rb); } if (oe) { @@ -119,13 +120,16 @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe) uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe) { - int64_t rt = 0; + uint64_t rt = 0; int64_t ra = (int64_t)rau; int64_t rb = (int64_t)rbu; - int overflow = divs128(&rt, &ra, rb); + int overflow = 0; - if (unlikely(overflow)) { + if (unlikely(rb == 0 || uabs64(ra) >= uabs64(rb))) { + overflow = 1; rt = 0; /* Undefined */ + } else { + divs128(&rt, &ra, rb); } if (oe) { @@ -320,7 +324,7 @@ target_ulong helper_popcntb(target_ulong val) } #endif -uint64_t helper_cfuged(uint64_t src, uint64_t mask) +uint64_t helper_CFUGED(uint64_t src, uint64_t mask) { /* * Instead of processing the mask bit-by-bit from the most significant to @@ -382,6 +386,42 @@ uint64_t helper_cfuged(uint64_t src, uint64_t mask) return left | (right >> n); } +uint64_t helper_PDEPD(uint64_t src, uint64_t mask) +{ + int i, o; + uint64_t result = 0; + + if (mask == -1) { + return src; + } + + for (i = 0; mask != 0; i++) { + o = ctz64(mask); + mask &= mask - 1; + result |= ((src >> i) & 1) << o; + } + + return result; +} + +uint64_t helper_PEXTD(uint64_t src, uint64_t mask) +{ + int i, o; + uint64_t result = 0; + + if (mask == -1) { + return src; + } + + for (o = 0; mask != 0; o++) { + i = ctz64(mask); + mask &= mask - 1; + result |= ((src >> i) & 1) << o; + } + + return result; +} + /*****************************************************************************/ /* PowerPC 601 specific instructions (POWER bridge) */ target_ulong helper_div(CPUPPCState *env, target_ulong arg1, target_ulong arg2) @@ -1573,25 +1613,73 @@ void helper_vslo(ppc_avr_t *r, ppc_avr_t *a, ppc_avr_t *b) } #if defined(HOST_WORDS_BIGENDIAN) -#define VINSERT(suffix, element) \ - void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \ - { \ - memmove(&r->u8[index], &b->u8[8 - sizeof(r->element[0])], \ - sizeof(r->element[0])); \ - } +#define ELEM_ADDR(VEC, IDX, SIZE) (&(VEC)->u8[IDX]) #else -#define VINSERT(suffix, element) \ - void helper_vinsert##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \ - { \ - uint32_t d = (16 - index) - sizeof(r->element[0]); \ - memmove(&r->u8[d], &b->u8[8], sizeof(r->element[0])); \ - } +#define ELEM_ADDR(VEC, IDX, SIZE) (&(VEC)->u8[15 - (IDX)] - (SIZE) + 1) #endif -VINSERT(b, u8) -VINSERT(h, u16) -VINSERT(w, u32) -VINSERT(d, u64) -#undef VINSERT + +#define VINSX(SUFFIX, TYPE) \ +void glue(glue(helper_VINS, SUFFIX), LX)(CPUPPCState *env, ppc_avr_t *t, \ + uint64_t val, target_ulong index) \ +{ \ + const int maxidx = ARRAY_SIZE(t->u8) - sizeof(TYPE); \ + target_long idx = index; \ + \ + if (idx < 0 || idx > maxidx) { \ + idx = idx < 0 ? sizeof(TYPE) - idx : idx; \ + qemu_log_mask(LOG_GUEST_ERROR, \ + "Invalid index for Vector Insert Element after 0x" TARGET_FMT_lx \ + ", RA = " TARGET_FMT_ld " > %d\n", env->nip, idx, maxidx); \ + } else { \ + TYPE src = val; \ + memcpy(ELEM_ADDR(t, idx, sizeof(TYPE)), &src, sizeof(TYPE)); \ + } \ +} +VINSX(B, uint8_t) +VINSX(H, uint16_t) +VINSX(W, uint32_t) +VINSX(D, uint64_t) +#undef ELEM_ADDR +#undef VINSX +#if defined(HOST_WORDS_BIGENDIAN) +#define VEXTDVLX(NAME, SIZE) \ +void helper_##NAME(CPUPPCState *env, ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, \ + target_ulong index) \ +{ \ + const target_long idx = index; \ + ppc_avr_t tmp[2] = { *a, *b }; \ + memset(t, 0, sizeof(*t)); \ + if (idx >= 0 && idx + SIZE <= sizeof(tmp)) { \ + memcpy(&t->u8[ARRAY_SIZE(t->u8) / 2 - SIZE], (void *)tmp + idx, SIZE); \ + } else { \ + qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for " #NAME " after 0x" \ + TARGET_FMT_lx ", RC = " TARGET_FMT_ld " > %d\n", \ + env->nip, idx < 0 ? SIZE - idx : idx, 32 - SIZE); \ + } \ +} +#else +#define VEXTDVLX(NAME, SIZE) \ +void helper_##NAME(CPUPPCState *env, ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, \ + target_ulong index) \ +{ \ + const target_long idx = index; \ + ppc_avr_t tmp[2] = { *b, *a }; \ + memset(t, 0, sizeof(*t)); \ + if (idx >= 0 && idx + SIZE <= sizeof(tmp)) { \ + memcpy(&t->u8[ARRAY_SIZE(t->u8) / 2], \ + (void *)tmp + sizeof(tmp) - SIZE - idx, SIZE); \ + } else { \ + qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for " #NAME " after 0x" \ + TARGET_FMT_lx ", RC = " TARGET_FMT_ld " > %d\n", \ + env->nip, idx < 0 ? SIZE - idx : idx, 32 - SIZE); \ + } \ +} +#endif +VEXTDVLX(VEXTDUBVLX, 1) +VEXTDVLX(VEXTDUHVLX, 2) +VEXTDVLX(VEXTDUWVLX, 4) +VEXTDVLX(VEXTDDVLX, 8) +#undef VEXTDVLX #if defined(HOST_WORDS_BIGENDIAN) #define VEXTRACT(suffix, element) \ void helper_vextract##suffix(ppc_avr_t *r, ppc_avr_t *b, uint32_t index) \ @@ -1649,6 +1737,21 @@ void helper_xxinsertw(CPUPPCState *env, ppc_vsr_t *xt, *xt = t; } +#define XXBLEND(name, sz) \ +void glue(helper_XXBLENDV, name)(ppc_avr_t *t, ppc_avr_t *a, ppc_avr_t *b, \ + ppc_avr_t *c, uint32_t desc) \ +{ \ + for (int i = 0; i < ARRAY_SIZE(t->glue(u, sz)); i++) { \ + t->glue(u, sz)[i] = (c->glue(s, sz)[i] >> (sz - 1)) ? \ + b->glue(u, sz)[i] : a->glue(u, sz)[i]; \ + } \ +} +XXBLEND(B, 8) +XXBLEND(H, 16) +XXBLEND(W, 32) +XXBLEND(D, 64) +#undef XXBLEND + #define VEXT_SIGNED(name, element, cast) \ void helper_##name(ppc_avr_t *r, ppc_avr_t *b) \ { \ @@ -2480,41 +2583,77 @@ uint32_t helper_bcdctz(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps) return cr; } +/** + * Compare 2 128-bit unsigned integers, passed in as unsigned 64-bit pairs + * + * Returns: + * > 0 if ahi|alo > bhi|blo, + * 0 if ahi|alo == bhi|blo, + * < 0 if ahi|alo < bhi|blo + */ +static inline int ucmp128(uint64_t alo, uint64_t ahi, + uint64_t blo, uint64_t bhi) +{ + return (ahi == bhi) ? + (alo > blo ? 1 : (alo == blo ? 0 : -1)) : + (ahi > bhi ? 1 : -1); +} + uint32_t helper_bcdcfsq(ppc_avr_t *r, ppc_avr_t *b, uint32_t ps) { int i; - int cr = 0; + int cr; uint64_t lo_value; uint64_t hi_value; + uint64_t rem; ppc_avr_t ret = { .u64 = { 0, 0 } }; if (b->VsrSD(0) < 0) { lo_value = -b->VsrSD(1); hi_value = ~b->VsrD(0) + !lo_value; bcd_put_digit(&ret, 0xD, 0); + + cr = CRF_LT; } else { lo_value = b->VsrD(1); hi_value = b->VsrD(0); bcd_put_digit(&ret, bcd_preferred_sgn(0, ps), 0); + + if (hi_value == 0 && lo_value == 0) { + cr = CRF_EQ; + } else { + cr = CRF_GT; + } } - if (divu128(&lo_value, &hi_value, 1000000000000000ULL) || - lo_value > 9999999999999999ULL) { - cr = CRF_SO; + /* + * Check src limits: abs(src) <= 10^31 - 1 + * + * 10^31 - 1 = 0x0000007e37be2022 c0914b267fffffff + */ + if (ucmp128(lo_value, hi_value, + 0xc0914b267fffffffULL, 0x7e37be2022ULL) > 0) { + cr |= CRF_SO; + + /* + * According to the ISA, if src wouldn't fit in the destination + * register, the result is undefined. + * In that case, we leave r unchanged. + */ + } else { + rem = divu128(&lo_value, &hi_value, 1000000000000000ULL); + + for (i = 1; i < 16; rem /= 10, i++) { + bcd_put_digit(&ret, rem % 10, i); + } + + for (; i < 32; lo_value /= 10, i++) { + bcd_put_digit(&ret, lo_value % 10, i); + } + + *r = ret; } - for (i = 1; i < 16; hi_value /= 10, i++) { - bcd_put_digit(&ret, hi_value % 10, i); - } - - for (; i < 32; lo_value /= 10, i++) { - bcd_put_digit(&ret, lo_value % 10, i); - } - - cr |= bcd_cmp_zero(&ret); - - *r = ret; - return cr; } diff --git a/target/ppc/internal.h b/target/ppc/internal.h index 55284369f5..6aa9484f34 100644 --- a/target/ppc/internal.h +++ b/target/ppc/internal.h @@ -211,11 +211,6 @@ void helper_compute_fprf_float16(CPUPPCState *env, float16 arg); void helper_compute_fprf_float32(CPUPPCState *env, float32 arg); void helper_compute_fprf_float128(CPUPPCState *env, float128 arg); -/* Raise a data fault alignment exception for the specified virtual address */ -void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, - MMUAccessType access_type, int mmu_idx, - uintptr_t retaddr) QEMU_NORETURN; - /* translate.c */ int ppc_fixup_cpu(PowerPCCPU *cpu); @@ -283,5 +278,17 @@ static inline void pte_invalidate(target_ulong *pte0) #define PTE_PTEM_MASK 0x7FFFFFBF #define PTE_CHECK_MASK (TARGET_PAGE_MASK | 0x7B) +#ifdef CONFIG_USER_ONLY +void ppc_cpu_record_sigsegv(CPUState *cs, vaddr addr, + MMUAccessType access_type, + bool maperr, uintptr_t ra); +#else +bool ppc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); +void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, int mmu_idx, + uintptr_t retaddr) QEMU_NORETURN; +#endif #endif /* PPC_INTERNAL_H */ diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c index e2282baa8d..39945d9ea5 100644 --- a/target/ppc/mem_helper.c +++ b/target/ppc/mem_helper.c @@ -25,7 +25,6 @@ #include "exec/helper-proto.h" #include "helper_regs.h" #include "exec/cpu_ldst.h" -#include "tcg/tcg.h" #include "internal.h" #include "qemu/atomic128.h" diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c index 2cb98c5169..e0c4950dda 100644 --- a/target/ppc/mmu_helper.c +++ b/target/ppc/mmu_helper.c @@ -1216,7 +1216,7 @@ void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address) } static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn, - uint32_t ea) + vaddr ea) { int i; int ways = booke206_tlb_ways(env, tlbn); diff --git a/target/ppc/monitor.c b/target/ppc/monitor.c index a475108b2d..0b805ef6e9 100644 --- a/target/ppc/monitor.c +++ b/target/ppc/monitor.c @@ -44,6 +44,13 @@ static target_long monitor_get_ccr(Monitor *mon, const struct MonitorDef *md, return u; } +static target_long monitor_get_xer(Monitor *mon, const struct MonitorDef *md, + int val) +{ + CPUArchState *env = mon_get_cpu_env(mon); + return cpu_read_xer(env); +} + static target_long monitor_get_decr(Monitor *mon, const struct MonitorDef *md, int val) { @@ -85,7 +92,7 @@ const MonitorDef monitor_defs[] = { { "decr", 0, &monitor_get_decr, }, { "ccr|cr", 0, &monitor_get_ccr, }, /* Machine state register */ - { "xer", offsetof(CPUPPCState, xer) }, + { "xer", 0, &monitor_get_xer }, { "msr", offsetof(CPUPPCState, msr) }, { "tbu", 0, &monitor_get_tbu, }, { "tbl", 0, &monitor_get_tbl, }, diff --git a/target/ppc/power8-pmu-regs.c.inc b/target/ppc/power8-pmu-regs.c.inc new file mode 100644 index 0000000000..7391851238 --- /dev/null +++ b/target/ppc/power8-pmu-regs.c.inc @@ -0,0 +1,262 @@ +/* + * PMU register read/write functions for TCG IBM POWER chips + * + * Copyright IBM Corp. 2021 + * + * Authors: + * Daniel Henrique Barboza + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) + +/* + * Checks whether the Group A SPR (MMCR0, MMCR2, MMCRA, and the + * PMCs) has problem state read access. + * + * Read acccess is granted for all PMCC values but 0b01, where a + * Facility Unavailable Interrupt will occur. + */ +static bool spr_groupA_read_allowed(DisasContext *ctx) +{ + if (!ctx->mmcr0_pmcc0 && ctx->mmcr0_pmcc1) { + gen_hvpriv_exception(ctx, POWERPC_EXCP_FU); + return false; + } + + return true; +} + +/* + * Checks whether the Group A SPR (MMCR0, MMCR2, MMCRA, and the + * PMCs) has problem state write access. + * + * Write acccess is granted for PMCC values 0b10 and 0b11. Userspace + * writing with PMCC 0b00 will generate a Hypervisor Emulation + * Assistance Interrupt. Userspace writing with PMCC 0b01 will + * generate a Facility Unavailable Interrupt. + */ +static bool spr_groupA_write_allowed(DisasContext *ctx) +{ + if (ctx->mmcr0_pmcc0) { + return true; + } + + if (ctx->mmcr0_pmcc1) { + /* PMCC = 0b01 */ + gen_hvpriv_exception(ctx, POWERPC_EXCP_FU); + } else { + /* PMCC = 0b00 */ + gen_hvpriv_exception(ctx, POWERPC_EXCP_INVAL_SPR); + } + + return false; +} + +/* + * Helper function to avoid code repetition between MMCR0 and + * MMCR2 problem state write functions. + * + * 'ret' must be tcg_temp_freed() by the caller. + */ +static TCGv masked_gprn_for_spr_write(int gprn, int sprn, + uint64_t spr_mask) +{ + TCGv ret = tcg_temp_new(); + TCGv t0 = tcg_temp_new(); + + /* 'ret' starts with all mask bits cleared */ + gen_load_spr(ret, sprn); + tcg_gen_andi_tl(ret, ret, ~(spr_mask)); + + /* Apply the mask into 'gprn' in a temp var */ + tcg_gen_andi_tl(t0, cpu_gpr[gprn], spr_mask); + + /* Add the masked gprn bits into 'ret' */ + tcg_gen_or_tl(ret, ret, t0); + + tcg_temp_free(t0); + + return ret; +} + +void spr_read_MMCR0_ureg(DisasContext *ctx, int gprn, int sprn) +{ + TCGv t0; + + if (!spr_groupA_read_allowed(ctx)) { + return; + } + + t0 = tcg_temp_new(); + + /* + * Filter out all bits but FC, PMAO, and PMAE, according + * to ISA v3.1, in 10.4.4 Monitor Mode Control Register 0, + * fourth paragraph. + */ + gen_load_spr(t0, SPR_POWER_MMCR0); + tcg_gen_andi_tl(t0, t0, MMCR0_UREG_MASK); + tcg_gen_mov_tl(cpu_gpr[gprn], t0); + + tcg_temp_free(t0); +} + +void spr_write_MMCR0_ureg(DisasContext *ctx, int sprn, int gprn) +{ + TCGv masked_gprn; + + if (!spr_groupA_write_allowed(ctx)) { + return; + } + + /* + * Filter out all bits but FC, PMAO, and PMAE, according + * to ISA v3.1, in 10.4.4 Monitor Mode Control Register 0, + * fourth paragraph. + */ + masked_gprn = masked_gprn_for_spr_write(gprn, SPR_POWER_MMCR0, + MMCR0_UREG_MASK); + gen_store_spr(SPR_POWER_MMCR0, masked_gprn); + + tcg_temp_free(masked_gprn); +} + +void spr_read_MMCR2_ureg(DisasContext *ctx, int gprn, int sprn) +{ + TCGv t0; + + if (!spr_groupA_read_allowed(ctx)) { + return; + } + + t0 = tcg_temp_new(); + + /* + * On read, filter out all bits that are not FCnP0 bits. + * When MMCR0[PMCC] is set to 0b10 or 0b11, providing + * problem state programs read/write access to MMCR2, + * only the FCnP0 bits can be accessed. All other bits are + * not changed when mtspr is executed in problem state, and + * all other bits return 0s when mfspr is executed in problem + * state, according to ISA v3.1, section 10.4.6 Monitor Mode + * Control Register 2, p. 1316, third paragraph. + */ + gen_load_spr(t0, SPR_POWER_MMCR2); + tcg_gen_andi_tl(t0, t0, MMCR2_UREG_MASK); + tcg_gen_mov_tl(cpu_gpr[gprn], t0); + + tcg_temp_free(t0); +} + +void spr_write_MMCR2_ureg(DisasContext *ctx, int sprn, int gprn) +{ + TCGv masked_gprn; + + if (!spr_groupA_write_allowed(ctx)) { + return; + } + + /* + * Filter the bits that can be written using MMCR2_UREG_MASK, + * similar to what is done in spr_write_MMCR0_ureg(). + */ + masked_gprn = masked_gprn_for_spr_write(gprn, SPR_POWER_MMCR2, + MMCR2_UREG_MASK); + gen_store_spr(SPR_POWER_MMCR2, masked_gprn); + + tcg_temp_free(masked_gprn); +} + +void spr_read_PMC14_ureg(DisasContext *ctx, int gprn, int sprn) +{ + if (!spr_groupA_read_allowed(ctx)) { + return; + } + + spr_read_ureg(ctx, gprn, sprn); +} + +void spr_read_PMC56_ureg(DisasContext *ctx, int gprn, int sprn) +{ + /* + * If PMCC = 0b11, PMC5 and PMC6 aren't included in the Performance + * Monitor, and a read attempt results in a Facility Unavailable + * Interrupt. + */ + if (ctx->mmcr0_pmcc0 && ctx->mmcr0_pmcc1) { + gen_hvpriv_exception(ctx, POWERPC_EXCP_FU); + return; + } + + /* The remaining steps are similar to PMCs 1-4 userspace read */ + spr_read_PMC14_ureg(ctx, gprn, sprn); +} + +void spr_write_PMC14_ureg(DisasContext *ctx, int sprn, int gprn) +{ + if (!spr_groupA_write_allowed(ctx)) { + return; + } + + spr_write_ureg(ctx, sprn, gprn); +} + +void spr_write_PMC56_ureg(DisasContext *ctx, int sprn, int gprn) +{ + /* + * If PMCC = 0b11, PMC5 and PMC6 aren't included in the Performance + * Monitor, and a write attempt results in a Facility Unavailable + * Interrupt. + */ + if (ctx->mmcr0_pmcc0 && ctx->mmcr0_pmcc1) { + gen_hvpriv_exception(ctx, POWERPC_EXCP_FU); + return; + } + + /* The remaining steps are similar to PMCs 1-4 userspace write */ + spr_write_PMC14_ureg(ctx, sprn, gprn); +} +#else +void spr_read_MMCR0_ureg(DisasContext *ctx, int gprn, int sprn) +{ + spr_read_ureg(ctx, gprn, sprn); +} + +void spr_write_MMCR0_ureg(DisasContext *ctx, int sprn, int gprn) +{ + spr_noaccess(ctx, gprn, sprn); +} + +void spr_read_MMCR2_ureg(DisasContext *ctx, int gprn, int sprn) +{ + spr_read_ureg(ctx, gprn, sprn); +} + +void spr_write_MMCR2_ureg(DisasContext *ctx, int sprn, int gprn) +{ + spr_noaccess(ctx, gprn, sprn); +} + +void spr_read_PMC14_ureg(DisasContext *ctx, int gprn, int sprn) +{ + spr_read_ureg(ctx, gprn, sprn); +} + +void spr_read_PMC56_ureg(DisasContext *ctx, int gprn, int sprn) +{ + spr_read_ureg(ctx, gprn, sprn); +} + +void spr_write_PMC14_ureg(DisasContext *ctx, int sprn, int gprn) +{ + spr_noaccess(ctx, gprn, sprn); +} + +void spr_write_PMC56_ureg(DisasContext *ctx, int sprn, int gprn) +{ + spr_noaccess(ctx, gprn, sprn); +} +#endif /* defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) */ diff --git a/target/ppc/spr_tcg.h b/target/ppc/spr_tcg.h index 0be5f347d5..520f1ef233 100644 --- a/target/ppc/spr_tcg.h +++ b/target/ppc/spr_tcg.h @@ -32,6 +32,10 @@ void spr_write_lr(DisasContext *ctx, int sprn, int gprn); void spr_read_ctr(DisasContext *ctx, int gprn, int sprn); void spr_write_ctr(DisasContext *ctx, int sprn, int gprn); void spr_read_ureg(DisasContext *ctx, int gprn, int sprn); +void spr_read_MMCR0_ureg(DisasContext *ctx, int gprn, int sprn); +void spr_read_MMCR2_ureg(DisasContext *ctx, int gprn, int sprn); +void spr_read_PMC14_ureg(DisasContext *ctx, int gprn, int sprn); +void spr_read_PMC56_ureg(DisasContext *ctx, int gprn, int sprn); void spr_read_tbl(DisasContext *ctx, int gprn, int sprn); void spr_read_tbu(DisasContext *ctx, int gprn, int sprn); void spr_read_atbl(DisasContext *ctx, int gprn, int sprn); @@ -40,6 +44,10 @@ void spr_read_601_rtcl(DisasContext *ctx, int gprn, int sprn); void spr_read_601_rtcu(DisasContext *ctx, int gprn, int sprn); void spr_read_spefscr(DisasContext *ctx, int gprn, int sprn); void spr_write_spefscr(DisasContext *ctx, int sprn, int gprn); +void spr_write_MMCR0_ureg(DisasContext *ctx, int sprn, int gprn); +void spr_write_MMCR2_ureg(DisasContext *ctx, int sprn, int gprn); +void spr_write_PMC14_ureg(DisasContext *ctx, int sprn, int gprn); +void spr_write_PMC56_ureg(DisasContext *ctx, int sprn, int gprn); #ifndef CONFIG_USER_ONLY void spr_write_generic32(DisasContext *ctx, int sprn, int gprn); diff --git a/target/ppc/trace-events b/target/ppc/trace-events index c88cfccf8d..53b107f56e 100644 --- a/target/ppc/trace-events +++ b/target/ppc/trace-events @@ -28,3 +28,11 @@ kvm_handle_epr(void) "handle epr" kvm_handle_watchdog_expiry(void) "handle watchdog expiry" kvm_handle_debug_exception(void) "handle debug exception" kvm_handle_nmi_exception(void) "handle NMI exception" + +# excp_helper.c +ppc_excp_rfi(uint64_t nip, uint64_t msr) "Return from exception at 0x%" PRIx64 " with flags 0x%016" PRIx64 +ppc_excp_dsi(uint64_t dsisr, uint64_t dar) "DSI exception: DSISR=0x%" PRIx64 " DAR=0x%" PRIx64 +ppc_excp_isi(uint64_t msr, uint64_t nip) "ISI exception: msr=0x%016" PRIx64 " nip=0x%" PRIx64 +ppc_excp_fp_ignore(void) "Ignore floating point exception" +ppc_excp_inval(uint64_t nip) "Invalid instruction at 0x%" PRIx64 +ppc_excp_print(const char *excp) "%s exception" diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 5d8b06bd80..9960df6e18 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -42,7 +42,6 @@ #define CPU_SINGLE_STEP 0x1 #define CPU_BRANCH_STEP 0x2 -#define GDBSTUB_SINGLE_STEP 0x4 /* Include definitions for instructions classes and implementations flags */ /* #define PPC_DEBUG_DISAS */ @@ -175,6 +174,9 @@ struct DisasContext { bool spe_enabled; bool tm_enabled; bool gtse; + bool hr; + bool mmcr0_pmcc0; + bool mmcr0_pmcc1; ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */ int singlestep_enabled; uint32_t flags; @@ -332,7 +334,7 @@ static uint32_t gen_prep_dbgex(DisasContext *ctx) static void gen_debug_exception(DisasContext *ctx) { - gen_helper_raise_exception(cpu_env, tcg_constant_i32(EXCP_DEBUG)); + gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx))); ctx->base.is_jmp = DISAS_NORETURN; } @@ -3195,6 +3197,20 @@ static inline void gen_align_no_le(DisasContext *ctx) (ctx->opcode & 0x03FF0000) | POWERPC_EXCP_ALIGN_LE); } +static TCGv do_ea_calc(DisasContext *ctx, int ra, TCGv displ) +{ + TCGv ea = tcg_temp_new(); + if (ra) { + tcg_gen_add_tl(ea, cpu_gpr[ra], displ); + } else { + tcg_gen_mov_tl(ea, displ); + } + if (NARROW_MODE(ctx)) { + tcg_gen_ext32u_tl(ea, ea); + } + return ea; +} + /*** Integer load ***/ #define DEF_MEMOP(op) ((op) | ctx->default_tcg_memop_mask) #define BSWAP_MEMOP(op) ((op) | (ctx->default_tcg_memop_mask ^ MO_BSWAP)) @@ -3311,69 +3327,6 @@ GEN_LDX_HVRM(ldcix, ld64_i64, 0x15, 0x1b, PPC_CILDST) GEN_LDX_HVRM(lwzcix, ld32u, 0x15, 0x15, PPC_CILDST) GEN_LDX_HVRM(lhzcix, ld16u, 0x15, 0x19, PPC_CILDST) GEN_LDX_HVRM(lbzcix, ld8u, 0x15, 0x1a, PPC_CILDST) - -/* lq */ -static void gen_lq(DisasContext *ctx) -{ - int ra, rd; - TCGv EA, hi, lo; - - /* lq is a legal user mode instruction starting in ISA 2.07 */ - bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0; - bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0; - - if (!legal_in_user_mode && ctx->pr) { - gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); - return; - } - - if (!le_is_supported && ctx->le_mode) { - gen_align_no_le(ctx); - return; - } - ra = rA(ctx->opcode); - rd = rD(ctx->opcode); - if (unlikely((rd & 1) || rd == ra)) { - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); - return; - } - - gen_set_access_type(ctx, ACCESS_INT); - EA = tcg_temp_new(); - gen_addr_imm_index(ctx, EA, 0x0F); - - /* Note that the low part is always in RD+1, even in LE mode. */ - lo = cpu_gpr[rd + 1]; - hi = cpu_gpr[rd]; - - if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { - if (HAVE_ATOMIC128) { - TCGv_i32 oi = tcg_temp_new_i32(); - if (ctx->le_mode) { - tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx)); - gen_helper_lq_le_parallel(lo, cpu_env, EA, oi); - } else { - tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx)); - gen_helper_lq_be_parallel(lo, cpu_env, EA, oi); - } - tcg_temp_free_i32(oi); - tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh)); - } else { - /* Restart with exclusive lock. */ - gen_helper_exit_atomic(cpu_env); - ctx->base.is_jmp = DISAS_NORETURN; - } - } else if (ctx->le_mode) { - tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_LEQ); - gen_addr_add(ctx, EA, EA, 8); - tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_LEQ); - } else { - tcg_gen_qemu_ld_i64(hi, EA, ctx->mem_idx, MO_BEQ); - gen_addr_add(ctx, EA, EA, 8); - tcg_gen_qemu_ld_i64(lo, EA, ctx->mem_idx, MO_BEQ); - } - tcg_temp_free(EA); -} #endif /*** Integer store ***/ @@ -3419,88 +3372,6 @@ GEN_STX_HVRM(stdcix, st64_i64, 0x15, 0x1f, PPC_CILDST) GEN_STX_HVRM(stwcix, st32, 0x15, 0x1c, PPC_CILDST) GEN_STX_HVRM(sthcix, st16, 0x15, 0x1d, PPC_CILDST) GEN_STX_HVRM(stbcix, st8, 0x15, 0x1e, PPC_CILDST) - -static void gen_std(DisasContext *ctx) -{ - int rs; - TCGv EA; - - rs = rS(ctx->opcode); - if ((ctx->opcode & 0x3) == 0x2) { /* stq */ - bool legal_in_user_mode = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0; - bool le_is_supported = (ctx->insns_flags2 & PPC2_LSQ_ISA207) != 0; - TCGv hi, lo; - - if (!(ctx->insns_flags & PPC_64BX)) { - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); - } - - if (!legal_in_user_mode && ctx->pr) { - gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); - return; - } - - if (!le_is_supported && ctx->le_mode) { - gen_align_no_le(ctx); - return; - } - - if (unlikely(rs & 1)) { - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); - return; - } - gen_set_access_type(ctx, ACCESS_INT); - EA = tcg_temp_new(); - gen_addr_imm_index(ctx, EA, 0x03); - - /* Note that the low part is always in RS+1, even in LE mode. */ - lo = cpu_gpr[rs + 1]; - hi = cpu_gpr[rs]; - - if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { - if (HAVE_ATOMIC128) { - TCGv_i32 oi = tcg_temp_new_i32(); - if (ctx->le_mode) { - tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx)); - gen_helper_stq_le_parallel(cpu_env, EA, lo, hi, oi); - } else { - tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx)); - gen_helper_stq_be_parallel(cpu_env, EA, lo, hi, oi); - } - tcg_temp_free_i32(oi); - } else { - /* Restart with exclusive lock. */ - gen_helper_exit_atomic(cpu_env); - ctx->base.is_jmp = DISAS_NORETURN; - } - } else if (ctx->le_mode) { - tcg_gen_qemu_st_i64(lo, EA, ctx->mem_idx, MO_LEQ); - gen_addr_add(ctx, EA, EA, 8); - tcg_gen_qemu_st_i64(hi, EA, ctx->mem_idx, MO_LEQ); - } else { - tcg_gen_qemu_st_i64(hi, EA, ctx->mem_idx, MO_BEQ); - gen_addr_add(ctx, EA, EA, 8); - tcg_gen_qemu_st_i64(lo, EA, ctx->mem_idx, MO_BEQ); - } - tcg_temp_free(EA); - } else { - /* std / stdu */ - if (Rc(ctx->opcode)) { - if (unlikely(rA(ctx->opcode) == 0)) { - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); - return; - } - } - gen_set_access_type(ctx, ACCESS_INT); - EA = tcg_temp_new(); - gen_addr_imm_index(ctx, EA, 0x03); - gen_qemu_st64_i64(ctx, cpu_gpr[rs], EA); - if (Rc(ctx->opcode)) { - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); - } - tcg_temp_free(EA); - } -} #endif /*** Integer load and store with byte reverse ***/ @@ -4066,11 +3937,11 @@ static void gen_lqarx(DisasContext *ctx) if (HAVE_ATOMIC128) { TCGv_i32 oi = tcg_temp_new_i32(); if (ctx->le_mode) { - tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ | MO_ALIGN_16, + tcg_gen_movi_i32(oi, make_memop_idx(MO_LE | MO_128 | MO_ALIGN, ctx->mem_idx)); gen_helper_lq_le_parallel(lo, cpu_env, EA, oi); } else { - tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ | MO_ALIGN_16, + tcg_gen_movi_i32(oi, make_memop_idx(MO_BE | MO_128 | MO_ALIGN, ctx->mem_idx)); gen_helper_lq_be_parallel(lo, cpu_env, EA, oi); } @@ -4121,7 +3992,7 @@ static void gen_stqcx_(DisasContext *ctx) if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { if (HAVE_CMPXCHG128) { - TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_Q) | MO_ALIGN_16); + TCGv_i32 oi = tcg_const_i32(DEF_MEMOP(MO_128) | MO_ALIGN); if (ctx->le_mode) { gen_helper_stqcx_le_parallel(cpu_crf[0], cpu_env, EA, lo, hi, oi); @@ -4306,15 +4177,8 @@ static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest) static void gen_lookup_and_goto_ptr(DisasContext *ctx) { - int sse = ctx->singlestep_enabled; - if (unlikely(sse)) { - if (sse & GDBSTUB_SINGLE_STEP) { - gen_debug_exception(ctx); - } else if (sse & (CPU_SINGLE_STEP | CPU_BRANCH_STEP)) { - gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx))); - } else { - tcg_gen_exit_tb(NULL, 0); - } + if (unlikely(ctx->singlestep_enabled)) { + gen_debug_exception(ctx); } else { tcg_gen_lookup_and_goto_ptr(); } @@ -4939,32 +4803,40 @@ static void gen_mtmsrd(DisasContext *ctx) CHK_SV; #if !defined(CONFIG_USER_ONLY) + TCGv t0, t1; + target_ulong mask; + + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + gen_icount_io_start(ctx); + if (ctx->opcode & 0x00010000) { /* L=1 form only updates EE and RI */ - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], - (1 << MSR_RI) | (1 << MSR_EE)); - tcg_gen_andi_tl(t1, cpu_msr, - ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE))); - tcg_gen_or_tl(t1, t1, t0); - - gen_helper_store_msr(cpu_env, t1); - tcg_temp_free(t0); - tcg_temp_free(t1); - + mask = (1ULL << MSR_RI) | (1ULL << MSR_EE); } else { + /* mtmsrd does not alter HV, S, ME, or LE */ + mask = ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S) | + (1ULL << MSR_HV)); /* * XXX: we need to update nip before the store if we enter * power saving mode, we will exit the loop directly from * ppc_store_msr */ gen_update_nip(ctx, ctx->base.pc_next); - gen_helper_store_msr(cpu_env, cpu_gpr[rS(ctx->opcode)]); } + + tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask); + tcg_gen_andi_tl(t1, cpu_msr, ~mask); + tcg_gen_or_tl(t0, t0, t1); + + gen_helper_store_msr(cpu_env, t0); + /* Must stop the translation as machine state (may have) changed */ ctx->base.is_jmp = DISAS_EXIT_UPDATE; + + tcg_temp_free(t0); + tcg_temp_free(t1); #endif /* !defined(CONFIG_USER_ONLY) */ } #endif /* defined(TARGET_PPC64) */ @@ -4974,23 +4846,19 @@ static void gen_mtmsr(DisasContext *ctx) CHK_SV; #if !defined(CONFIG_USER_ONLY) + TCGv t0, t1; + target_ulong mask = 0xFFFFFFFF; + + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + gen_icount_io_start(ctx); if (ctx->opcode & 0x00010000) { /* L=1 form only updates EE and RI */ - TCGv t0 = tcg_temp_new(); - TCGv t1 = tcg_temp_new(); - tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], - (1 << MSR_RI) | (1 << MSR_EE)); - tcg_gen_andi_tl(t1, cpu_msr, - ~(target_ulong)((1 << MSR_RI) | (1 << MSR_EE))); - tcg_gen_or_tl(t1, t1, t0); - - gen_helper_store_msr(cpu_env, t1); - tcg_temp_free(t0); - tcg_temp_free(t1); - + mask &= (1ULL << MSR_RI) | (1ULL << MSR_EE); } else { - TCGv msr = tcg_temp_new(); + /* mtmsr does not alter S, ME, or LE */ + mask &= ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S)); /* * XXX: we need to update nip before the store if we enter @@ -4998,16 +4866,19 @@ static void gen_mtmsr(DisasContext *ctx) * ppc_store_msr */ gen_update_nip(ctx, ctx->base.pc_next); -#if defined(TARGET_PPC64) - tcg_gen_deposit_tl(msr, cpu_msr, cpu_gpr[rS(ctx->opcode)], 0, 32); -#else - tcg_gen_mov_tl(msr, cpu_gpr[rS(ctx->opcode)]); -#endif - gen_helper_store_msr(cpu_env, msr); - tcg_temp_free(msr); } + + tcg_gen_andi_tl(t0, cpu_gpr[rS(ctx->opcode)], mask); + tcg_gen_andi_tl(t1, cpu_msr, ~mask); + tcg_gen_or_tl(t0, t0, t1); + + gen_helper_store_msr(cpu_env, t0); + /* Must stop the translation as machine state (may have) changed */ ctx->base.is_jmp = DISAS_EXIT_UPDATE; + + tcg_temp_free(t0); + tcg_temp_free(t1); #endif } @@ -5073,19 +4944,15 @@ static void gen_mtspr(DisasContext *ctx) static void gen_setb(DisasContext *ctx) { TCGv_i32 t0 = tcg_temp_new_i32(); - TCGv_i32 t8 = tcg_temp_new_i32(); - TCGv_i32 tm1 = tcg_temp_new_i32(); + TCGv_i32 t8 = tcg_constant_i32(8); + TCGv_i32 tm1 = tcg_constant_i32(-1); int crf = crfS(ctx->opcode); tcg_gen_setcondi_i32(TCG_COND_GEU, t0, cpu_crf[crf], 4); - tcg_gen_movi_i32(t8, 8); - tcg_gen_movi_i32(tm1, -1); tcg_gen_movcond_i32(TCG_COND_GEU, t0, cpu_crf[crf], t8, tm1, t0); tcg_gen_ext_i32_tl(cpu_gpr[rD(ctx->opcode)], t0); tcg_temp_free_i32(t0); - tcg_temp_free_i32(t8); - tcg_temp_free_i32(tm1); } #endif @@ -5516,7 +5383,15 @@ static void gen_tlbiel(DisasContext *ctx) #if defined(CONFIG_USER_ONLY) GEN_PRIV; #else - CHK_SV; + bool psr = (ctx->opcode >> 17) & 0x1; + + if (ctx->pr || (!ctx->hv && !psr && ctx->hr)) { + /* + * tlbiel is privileged except when PSR=0 and HR=1, making it + * hypervisor privileged. + */ + GEN_PRIV; + } gen_helper_tlbie(cpu_env, cpu_gpr[rB(ctx->opcode)]); #endif /* defined(CONFIG_USER_ONLY) */ @@ -5528,12 +5403,20 @@ static void gen_tlbie(DisasContext *ctx) #if defined(CONFIG_USER_ONLY) GEN_PRIV; #else + bool psr = (ctx->opcode >> 17) & 0x1; TCGv_i32 t1; - if (ctx->gtse) { - CHK_SV; /* If gtse is set then tlbie is supervisor privileged */ - } else { - CHK_HV; /* Else hypervisor privileged */ + if (ctx->pr) { + /* tlbie is privileged... */ + GEN_PRIV; + } else if (!ctx->hv) { + if (!ctx->gtse || (!psr && ctx->hr)) { + /* + * ... except when GTSE=0 or when PSR=0 and HR=1, making it + * hypervisor privileged. + */ + GEN_PRIV; + } } if (NARROW_MODE(ctx)) { @@ -7422,11 +7305,21 @@ static inline void set_avr64(int regno, TCGv_i64 src, bool high) /* * Helpers for decodetree used by !function for decoding arguments. */ +static int times_2(DisasContext *ctx, int x) +{ + return x * 2; +} + static int times_4(DisasContext *ctx, int x) { return x * 4; } +static int times_16(DisasContext *ctx, int x) +{ + return x * 16; +} + /* * Helpers for trans_* functions to check for specific insns flags. * Use token pasting to ensure that we use the proper flag with the @@ -7453,6 +7346,30 @@ static int times_4(DisasContext *ctx, int x) # define REQUIRE_64BIT(CTX) REQUIRE_INSNS_FLAGS(CTX, 64B) #endif +#define REQUIRE_VECTOR(CTX) \ + do { \ + if (unlikely(!(CTX)->altivec_enabled)) { \ + gen_exception((CTX), POWERPC_EXCP_VPU); \ + return true; \ + } \ + } while (0) + +#define REQUIRE_VSX(CTX) \ + do { \ + if (unlikely(!(CTX)->vsx_enabled)) { \ + gen_exception((CTX), POWERPC_EXCP_VSXU); \ + return true; \ + } \ + } while (0) + +#define REQUIRE_FPU(ctx) \ + do { \ + if (unlikely(!(ctx)->fpu_enabled)) { \ + gen_exception((ctx), POWERPC_EXCP_FPU); \ + return true; \ + } \ + } while (0) + /* * Helpers for implementing sets of trans_* functions. * Defer the implementation of NAME to FUNC, with optional extra arguments. @@ -7470,6 +7387,27 @@ static int times_4(DisasContext *ctx, int x) #include "decode-insn32.c.inc" #include "decode-insn64.c.inc" +#include "power8-pmu-regs.c.inc" + +/* + * Incorporate CIA into the constant when R=1. + * Validate that when R=1, RA=0. + */ +static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a) +{ + d->rt = a->rt; + d->ra = a->ra; + d->si = a->si; + if (a->r) { + if (unlikely(a->ra != 0)) { + gen_invalid(ctx); + return false; + } + d->si += ctx->cia; + } + return true; +} + #include "translate/fixedpoint-impl.c.inc" #include "translate/fp-impl.c.inc" @@ -7477,7 +7415,6 @@ static int times_4(DisasContext *ctx, int x) #include "translate/vmx-impl.c.inc" #include "translate/vsx-impl.c.inc" -#include "translate/vector-impl.c.inc" #include "translate/dfp-impl.c.inc" @@ -7509,20 +7446,7 @@ static void gen_dform39(DisasContext *ctx) /* handles stfdp, lxv, stxsd, stxssp lxvx */ static void gen_dform3D(DisasContext *ctx) { - if ((ctx->opcode & 3) == 1) { /* DQ-FORM */ - switch (ctx->opcode & 0x7) { - case 1: /* lxv */ - if (ctx->insns_flags2 & PPC2_ISA300) { - return gen_lxv(ctx); - } - break; - case 5: /* stxv */ - if (ctx->insns_flags2 & PPC2_ISA300) { - return gen_stxv(ctx); - } - break; - } - } else { /* DS-FORM */ + if ((ctx->opcode & 3) != 1) { /* DS-FORM */ switch (ctx->opcode & 0x3) { case 0: /* stfdp */ if (ctx->insns_flags2 & PPC2_ISA205) { @@ -7562,18 +7486,16 @@ static void gen_brw(DisasContext *ctx) /* brh */ static void gen_brh(DisasContext *ctx) { - TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 mask = tcg_constant_i64(0x00ff00ff00ff00ffull); TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); - tcg_gen_movi_i64(t0, 0x00ff00ff00ff00ffull); tcg_gen_shri_i64(t1, cpu_gpr[rS(ctx->opcode)], 8); - tcg_gen_and_i64(t2, t1, t0); - tcg_gen_and_i64(t1, cpu_gpr[rS(ctx->opcode)], t0); + tcg_gen_and_i64(t2, t1, mask); + tcg_gen_and_i64(t1, cpu_gpr[rS(ctx->opcode)], mask); tcg_gen_shli_i64(t1, t1, 8); tcg_gen_or_i64(cpu_gpr[rA(ctx->opcode)], t1, t2); - tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); tcg_temp_free_i64(t2); } @@ -7647,13 +7569,9 @@ GEN_HANDLER2_E(extswsli0, "extswsli", 0x1F, 0x1A, 0x1B, 0x00000000, GEN_HANDLER2_E(extswsli1, "extswsli", 0x1F, 0x1B, 0x1B, 0x00000000, PPC_NONE, PPC2_ISA300), #endif -#if defined(TARGET_PPC64) -GEN_HANDLER(lq, 0x38, 0xFF, 0xFF, 0x00000000, PPC_64BX), -GEN_HANDLER(std, 0x3E, 0xFF, 0xFF, 0x00000000, PPC_64B), -#endif /* handles lfdp, lxsd, lxssp */ GEN_HANDLER_E(dform39, 0x39, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205), -/* handles stfdp, lxv, stxsd, stxssp, stxv */ +/* handles stfdp, stxsd, stxssp */ GEN_HANDLER_E(dform3D, 0x3D, 0xFF, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA205), GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), @@ -8155,8 +8073,6 @@ GEN_HANDLER2_E(trechkpt, "trechkpt", 0x1F, 0x0E, 0x1F, 0x03FFF800, \ #include "translate/vsx-ops.c.inc" -#include "translate/dfp-ops.c.inc" - #include "translate/spe-ops.c.inc" }; @@ -8539,21 +8455,18 @@ static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->vsx_enabled = (hflags >> HFLAGS_VSX) & 1; ctx->tm_enabled = (hflags >> HFLAGS_TM) & 1; ctx->gtse = (hflags >> HFLAGS_GTSE) & 1; + ctx->hr = (hflags >> HFLAGS_HR) & 1; + ctx->mmcr0_pmcc0 = (hflags >> HFLAGS_PMCC0) & 1; + ctx->mmcr0_pmcc1 = (hflags >> HFLAGS_PMCC1) & 1; ctx->singlestep_enabled = 0; if ((hflags >> HFLAGS_SE) & 1) { ctx->singlestep_enabled |= CPU_SINGLE_STEP; + ctx->base.max_insns = 1; } if ((hflags >> HFLAGS_BE) & 1) { ctx->singlestep_enabled |= CPU_BRANCH_STEP; } - if (unlikely(ctx->base.singlestep_enabled)) { - ctx->singlestep_enabled |= GDBSTUB_SINGLE_STEP; - } - - if (ctx->singlestep_enabled & (CPU_SINGLE_STEP | GDBSTUB_SINGLE_STEP)) { - ctx->base.max_insns = 1; - } } static void ppc_tr_tb_start(DisasContextBase *db, CPUState *cs) @@ -8622,7 +8535,6 @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) DisasContext *ctx = container_of(dcbase, DisasContext, base); DisasJumpType is_jmp = ctx->base.is_jmp; target_ulong nip = ctx->base.pc_next; - int sse; if (is_jmp == DISAS_NORETURN) { /* We have already exited the TB. */ @@ -8630,8 +8542,8 @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) } /* Honor single stepping. */ - sse = ctx->singlestep_enabled & (CPU_SINGLE_STEP | GDBSTUB_SINGLE_STEP); - if (unlikely(sse)) { + if (unlikely(ctx->singlestep_enabled & CPU_SINGLE_STEP) + && (nip <= 0x100 || nip > 0xf00)) { switch (is_jmp) { case DISAS_TOO_MANY: case DISAS_EXIT_UPDATE: @@ -8645,15 +8557,8 @@ static void ppc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) g_assert_not_reached(); } - if (sse & GDBSTUB_SINGLE_STEP) { - gen_debug_exception(ctx); - return; - } - /* else CPU_SINGLE_STEP... */ - if (nip <= 0x100 || nip > 0xf00) { - gen_helper_raise_exception(cpu_env, tcg_constant_i32(gen_prep_dbgex(ctx))); - return; - } + gen_debug_exception(ctx); + return; } switch (is_jmp) { diff --git a/target/ppc/translate/dfp-impl.c.inc b/target/ppc/translate/dfp-impl.c.inc index 6c556dc2e1..f9f1d58d44 100644 --- a/target/ppc/translate/dfp-impl.c.inc +++ b/target/ppc/translate/dfp-impl.c.inc @@ -7,226 +7,223 @@ static inline TCGv_ptr gen_fprp_ptr(int reg) return r; } -#define GEN_DFP_T_A_B_Rc(name) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_ptr rd, ra, rb; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_update_nip(ctx, ctx->base.pc_next - 4); \ - rd = gen_fprp_ptr(rD(ctx->opcode)); \ - ra = gen_fprp_ptr(rA(ctx->opcode)); \ - rb = gen_fprp_ptr(rB(ctx->opcode)); \ - gen_helper_##name(cpu_env, rd, ra, rb); \ - if (unlikely(Rc(ctx->opcode) != 0)) { \ - gen_set_cr1_from_fpscr(ctx); \ - } \ - tcg_temp_free_ptr(rd); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rb); \ +#define TRANS_DFP_T_A_B_Rc(NAME) \ +static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ +{ \ + TCGv_ptr rt, ra, rb; \ + REQUIRE_INSNS_FLAGS2(ctx, DFP); \ + REQUIRE_FPU(ctx); \ + rt = gen_fprp_ptr(a->rt); \ + ra = gen_fprp_ptr(a->ra); \ + rb = gen_fprp_ptr(a->rb); \ + gen_helper_##NAME(cpu_env, rt, ra, rb); \ + if (unlikely(a->rc)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_ptr(rt); \ + tcg_temp_free_ptr(ra); \ + tcg_temp_free_ptr(rb); \ + return true; \ } -#define GEN_DFP_BF_A_B(name) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_ptr ra, rb; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_update_nip(ctx, ctx->base.pc_next - 4); \ - ra = gen_fprp_ptr(rA(ctx->opcode)); \ - rb = gen_fprp_ptr(rB(ctx->opcode)); \ - gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \ - cpu_env, ra, rb); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_ptr(rb); \ +#define TRANS_DFP_BF_A_B(NAME) \ +static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ +{ \ + TCGv_ptr ra, rb; \ + REQUIRE_INSNS_FLAGS2(ctx, DFP); \ + REQUIRE_FPU(ctx); \ + ra = gen_fprp_ptr(a->ra); \ + rb = gen_fprp_ptr(a->rb); \ + gen_helper_##NAME(cpu_crf[a->bf], \ + cpu_env, ra, rb); \ + tcg_temp_free_ptr(ra); \ + tcg_temp_free_ptr(rb); \ + return true; \ } -#define GEN_DFP_BF_I_B(name) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_i32 uim; \ - TCGv_ptr rb; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_update_nip(ctx, ctx->base.pc_next - 4); \ - uim = tcg_const_i32(UIMM5(ctx->opcode)); \ - rb = gen_fprp_ptr(rB(ctx->opcode)); \ - gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \ - cpu_env, uim, rb); \ - tcg_temp_free_i32(uim); \ - tcg_temp_free_ptr(rb); \ +#define TRANS_DFP_BF_I_B(NAME) \ +static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ +{ \ + TCGv_ptr rb; \ + REQUIRE_INSNS_FLAGS2(ctx, DFP); \ + REQUIRE_FPU(ctx); \ + rb = gen_fprp_ptr(a->rb); \ + gen_helper_##NAME(cpu_crf[a->bf], \ + cpu_env, tcg_constant_i32(a->uim), rb);\ + tcg_temp_free_ptr(rb); \ + return true; \ } -#define GEN_DFP_BF_A_DCM(name) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_ptr ra; \ - TCGv_i32 dcm; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_update_nip(ctx, ctx->base.pc_next - 4); \ - ra = gen_fprp_ptr(rA(ctx->opcode)); \ - dcm = tcg_const_i32(DCM(ctx->opcode)); \ - gen_helper_##name(cpu_crf[crfD(ctx->opcode)], \ - cpu_env, ra, dcm); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_i32(dcm); \ +#define TRANS_DFP_BF_A_DCM(NAME) \ +static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ +{ \ + TCGv_ptr ra; \ + REQUIRE_INSNS_FLAGS2(ctx, DFP); \ + REQUIRE_FPU(ctx); \ + ra = gen_fprp_ptr(a->fra); \ + gen_helper_##NAME(cpu_crf[a->bf], \ + cpu_env, ra, tcg_constant_i32(a->dm)); \ + tcg_temp_free_ptr(ra); \ + return true; \ } -#define GEN_DFP_T_B_U32_U32_Rc(name, u32f1, u32f2) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_ptr rt, rb; \ - TCGv_i32 u32_1, u32_2; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_update_nip(ctx, ctx->base.pc_next - 4); \ - rt = gen_fprp_ptr(rD(ctx->opcode)); \ - rb = gen_fprp_ptr(rB(ctx->opcode)); \ - u32_1 = tcg_const_i32(u32f1(ctx->opcode)); \ - u32_2 = tcg_const_i32(u32f2(ctx->opcode)); \ - gen_helper_##name(cpu_env, rt, rb, u32_1, u32_2); \ - if (unlikely(Rc(ctx->opcode) != 0)) { \ - gen_set_cr1_from_fpscr(ctx); \ - } \ - tcg_temp_free_ptr(rt); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_i32(u32_1); \ - tcg_temp_free_i32(u32_2); \ +#define TRANS_DFP_T_B_U32_U32_Rc(NAME, U32F1, U32F2) \ +static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ +{ \ + TCGv_ptr rt, rb; \ + REQUIRE_INSNS_FLAGS2(ctx, DFP); \ + REQUIRE_FPU(ctx); \ + rt = gen_fprp_ptr(a->frt); \ + rb = gen_fprp_ptr(a->frb); \ + gen_helper_##NAME(cpu_env, rt, rb, \ + tcg_constant_i32(a->U32F1), \ + tcg_constant_i32(a->U32F2)); \ + if (unlikely(a->rc)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_ptr(rt); \ + tcg_temp_free_ptr(rb); \ + return true; \ } -#define GEN_DFP_T_A_B_I32_Rc(name, i32fld) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_ptr rt, ra, rb; \ - TCGv_i32 i32; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_update_nip(ctx, ctx->base.pc_next - 4); \ - rt = gen_fprp_ptr(rD(ctx->opcode)); \ - ra = gen_fprp_ptr(rA(ctx->opcode)); \ - rb = gen_fprp_ptr(rB(ctx->opcode)); \ - i32 = tcg_const_i32(i32fld(ctx->opcode)); \ - gen_helper_##name(cpu_env, rt, ra, rb, i32); \ - if (unlikely(Rc(ctx->opcode) != 0)) { \ - gen_set_cr1_from_fpscr(ctx); \ - } \ - tcg_temp_free_ptr(rt); \ - tcg_temp_free_ptr(rb); \ - tcg_temp_free_ptr(ra); \ - tcg_temp_free_i32(i32); \ - } - -#define GEN_DFP_T_B_Rc(name) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_ptr rt, rb; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_update_nip(ctx, ctx->base.pc_next - 4); \ - rt = gen_fprp_ptr(rD(ctx->opcode)); \ - rb = gen_fprp_ptr(rB(ctx->opcode)); \ - gen_helper_##name(cpu_env, rt, rb); \ - if (unlikely(Rc(ctx->opcode) != 0)) { \ - gen_set_cr1_from_fpscr(ctx); \ - } \ - tcg_temp_free_ptr(rt); \ - tcg_temp_free_ptr(rb); \ - } - -#define GEN_DFP_T_FPR_I32_Rc(name, fprfld, i32fld) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_ptr rt, rs; \ - TCGv_i32 i32; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_update_nip(ctx, ctx->base.pc_next - 4); \ - rt = gen_fprp_ptr(rD(ctx->opcode)); \ - rs = gen_fprp_ptr(fprfld(ctx->opcode)); \ - i32 = tcg_const_i32(i32fld(ctx->opcode)); \ - gen_helper_##name(cpu_env, rt, rs, i32); \ - if (unlikely(Rc(ctx->opcode) != 0)) { \ - gen_set_cr1_from_fpscr(ctx); \ - } \ - tcg_temp_free_ptr(rt); \ - tcg_temp_free_ptr(rs); \ - tcg_temp_free_i32(i32); \ +#define TRANS_DFP_T_A_B_I32_Rc(NAME, I32FLD) \ +static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ +{ \ + TCGv_ptr rt, ra, rb; \ + REQUIRE_INSNS_FLAGS2(ctx, DFP); \ + REQUIRE_FPU(ctx); \ + rt = gen_fprp_ptr(a->frt); \ + ra = gen_fprp_ptr(a->fra); \ + rb = gen_fprp_ptr(a->frb); \ + gen_helper_##NAME(cpu_env, rt, ra, rb, \ + tcg_constant_i32(a->I32FLD)); \ + if (unlikely(a->rc)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_ptr(rt); \ + tcg_temp_free_ptr(ra); \ + tcg_temp_free_ptr(rb); \ + return true; \ } -GEN_DFP_T_A_B_Rc(dadd) -GEN_DFP_T_A_B_Rc(daddq) -GEN_DFP_T_A_B_Rc(dsub) -GEN_DFP_T_A_B_Rc(dsubq) -GEN_DFP_T_A_B_Rc(dmul) -GEN_DFP_T_A_B_Rc(dmulq) -GEN_DFP_T_A_B_Rc(ddiv) -GEN_DFP_T_A_B_Rc(ddivq) -GEN_DFP_BF_A_B(dcmpu) -GEN_DFP_BF_A_B(dcmpuq) -GEN_DFP_BF_A_B(dcmpo) -GEN_DFP_BF_A_B(dcmpoq) -GEN_DFP_BF_A_DCM(dtstdc) -GEN_DFP_BF_A_DCM(dtstdcq) -GEN_DFP_BF_A_DCM(dtstdg) -GEN_DFP_BF_A_DCM(dtstdgq) -GEN_DFP_BF_A_B(dtstex) -GEN_DFP_BF_A_B(dtstexq) -GEN_DFP_BF_A_B(dtstsf) -GEN_DFP_BF_A_B(dtstsfq) -GEN_DFP_BF_I_B(dtstsfi) -GEN_DFP_BF_I_B(dtstsfiq) -GEN_DFP_T_B_U32_U32_Rc(dquai, SIMM5, RMC) -GEN_DFP_T_B_U32_U32_Rc(dquaiq, SIMM5, RMC) -GEN_DFP_T_A_B_I32_Rc(dqua, RMC) -GEN_DFP_T_A_B_I32_Rc(dquaq, RMC) -GEN_DFP_T_A_B_I32_Rc(drrnd, RMC) -GEN_DFP_T_A_B_I32_Rc(drrndq, RMC) -GEN_DFP_T_B_U32_U32_Rc(drintx, FPW, RMC) -GEN_DFP_T_B_U32_U32_Rc(drintxq, FPW, RMC) -GEN_DFP_T_B_U32_U32_Rc(drintn, FPW, RMC) -GEN_DFP_T_B_U32_U32_Rc(drintnq, FPW, RMC) -GEN_DFP_T_B_Rc(dctdp) -GEN_DFP_T_B_Rc(dctqpq) -GEN_DFP_T_B_Rc(drsp) -GEN_DFP_T_B_Rc(drdpq) -GEN_DFP_T_B_Rc(dcffix) -GEN_DFP_T_B_Rc(dcffixq) -GEN_DFP_T_B_Rc(dctfix) -GEN_DFP_T_B_Rc(dctfixq) -GEN_DFP_T_FPR_I32_Rc(ddedpd, rB, SP) -GEN_DFP_T_FPR_I32_Rc(ddedpdq, rB, SP) -GEN_DFP_T_FPR_I32_Rc(denbcd, rB, SP) -GEN_DFP_T_FPR_I32_Rc(denbcdq, rB, SP) -GEN_DFP_T_B_Rc(dxex) -GEN_DFP_T_B_Rc(dxexq) -GEN_DFP_T_A_B_Rc(diex) -GEN_DFP_T_A_B_Rc(diexq) -GEN_DFP_T_FPR_I32_Rc(dscli, rA, DCM) -GEN_DFP_T_FPR_I32_Rc(dscliq, rA, DCM) -GEN_DFP_T_FPR_I32_Rc(dscri, rA, DCM) -GEN_DFP_T_FPR_I32_Rc(dscriq, rA, DCM) +#define TRANS_DFP_T_B_Rc(NAME) \ +static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ +{ \ + TCGv_ptr rt, rb; \ + REQUIRE_INSNS_FLAGS2(ctx, DFP); \ + REQUIRE_FPU(ctx); \ + rt = gen_fprp_ptr(a->rt); \ + rb = gen_fprp_ptr(a->rb); \ + gen_helper_##NAME(cpu_env, rt, rb); \ + if (unlikely(a->rc)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_ptr(rt); \ + tcg_temp_free_ptr(rb); \ + return true; \ +} -#undef GEN_DFP_T_A_B_Rc -#undef GEN_DFP_BF_A_B -#undef GEN_DFP_BF_A_DCM -#undef GEN_DFP_T_B_U32_U32_Rc -#undef GEN_DFP_T_A_B_I32_Rc -#undef GEN_DFP_T_B_Rc -#undef GEN_DFP_T_FPR_I32_Rc +#define TRANS_DFP_T_FPR_I32_Rc(NAME, FPRFLD, I32FLD) \ +static bool trans_##NAME(DisasContext *ctx, arg_##NAME *a) \ +{ \ + TCGv_ptr rt, rx; \ + REQUIRE_INSNS_FLAGS2(ctx, DFP); \ + REQUIRE_FPU(ctx); \ + rt = gen_fprp_ptr(a->rt); \ + rx = gen_fprp_ptr(a->FPRFLD); \ + gen_helper_##NAME(cpu_env, rt, rx, \ + tcg_constant_i32(a->I32FLD)); \ + if (unlikely(a->rc)) { \ + gen_set_cr1_from_fpscr(ctx); \ + } \ + tcg_temp_free_ptr(rt); \ + tcg_temp_free_ptr(rx); \ + return true; \ +} + +TRANS_DFP_T_A_B_Rc(DADD) +TRANS_DFP_T_A_B_Rc(DADDQ) +TRANS_DFP_T_A_B_Rc(DSUB) +TRANS_DFP_T_A_B_Rc(DSUBQ) +TRANS_DFP_T_A_B_Rc(DMUL) +TRANS_DFP_T_A_B_Rc(DMULQ) +TRANS_DFP_T_A_B_Rc(DDIV) +TRANS_DFP_T_A_B_Rc(DDIVQ) +TRANS_DFP_BF_A_B(DCMPU) +TRANS_DFP_BF_A_B(DCMPUQ) +TRANS_DFP_BF_A_B(DCMPO) +TRANS_DFP_BF_A_B(DCMPOQ) +TRANS_DFP_BF_A_DCM(DTSTDC) +TRANS_DFP_BF_A_DCM(DTSTDCQ) +TRANS_DFP_BF_A_DCM(DTSTDG) +TRANS_DFP_BF_A_DCM(DTSTDGQ) +TRANS_DFP_BF_A_B(DTSTEX) +TRANS_DFP_BF_A_B(DTSTEXQ) +TRANS_DFP_BF_A_B(DTSTSF) +TRANS_DFP_BF_A_B(DTSTSFQ) +TRANS_DFP_BF_I_B(DTSTSFI) +TRANS_DFP_BF_I_B(DTSTSFIQ) +TRANS_DFP_T_B_U32_U32_Rc(DQUAI, te, rmc) +TRANS_DFP_T_B_U32_U32_Rc(DQUAIQ, te, rmc) +TRANS_DFP_T_A_B_I32_Rc(DQUA, rmc) +TRANS_DFP_T_A_B_I32_Rc(DQUAQ, rmc) +TRANS_DFP_T_A_B_I32_Rc(DRRND, rmc) +TRANS_DFP_T_A_B_I32_Rc(DRRNDQ, rmc) +TRANS_DFP_T_B_U32_U32_Rc(DRINTX, r, rmc) +TRANS_DFP_T_B_U32_U32_Rc(DRINTXQ, r, rmc) +TRANS_DFP_T_B_U32_U32_Rc(DRINTN, r, rmc) +TRANS_DFP_T_B_U32_U32_Rc(DRINTNQ, r, rmc) +TRANS_DFP_T_B_Rc(DCTDP) +TRANS_DFP_T_B_Rc(DCTQPQ) +TRANS_DFP_T_B_Rc(DRSP) +TRANS_DFP_T_B_Rc(DRDPQ) +TRANS_DFP_T_B_Rc(DCFFIX) +TRANS_DFP_T_B_Rc(DCFFIXQ) +TRANS_DFP_T_B_Rc(DCTFIX) +TRANS_DFP_T_B_Rc(DCTFIXQ) +TRANS_DFP_T_FPR_I32_Rc(DDEDPD, rb, sp) +TRANS_DFP_T_FPR_I32_Rc(DDEDPDQ, rb, sp) +TRANS_DFP_T_FPR_I32_Rc(DENBCD, rb, s) +TRANS_DFP_T_FPR_I32_Rc(DENBCDQ, rb, s) +TRANS_DFP_T_B_Rc(DXEX) +TRANS_DFP_T_B_Rc(DXEXQ) +TRANS_DFP_T_A_B_Rc(DIEX) +TRANS_DFP_T_A_B_Rc(DIEXQ) +TRANS_DFP_T_FPR_I32_Rc(DSCLI, ra, sh) +TRANS_DFP_T_FPR_I32_Rc(DSCLIQ, ra, sh) +TRANS_DFP_T_FPR_I32_Rc(DSCRI, ra, sh) +TRANS_DFP_T_FPR_I32_Rc(DSCRIQ, ra, sh) + +static bool trans_DCFFIXQQ(DisasContext *ctx, arg_DCFFIXQQ *a) +{ + TCGv_ptr rt, rb; + + REQUIRE_INSNS_FLAGS2(ctx, DFP); + REQUIRE_FPU(ctx); + REQUIRE_VECTOR(ctx); + + rt = gen_fprp_ptr(a->frtp); + rb = gen_avr_ptr(a->vrb); + gen_helper_DCFFIXQQ(cpu_env, rt, rb); + tcg_temp_free_ptr(rt); + tcg_temp_free_ptr(rb); + + return true; +} + +static bool trans_DCTFIXQQ(DisasContext *ctx, arg_DCTFIXQQ *a) +{ + TCGv_ptr rt, rb; + + REQUIRE_INSNS_FLAGS2(ctx, DFP); + REQUIRE_FPU(ctx); + REQUIRE_VECTOR(ctx); + + rt = gen_avr_ptr(a->vrt); + rb = gen_fprp_ptr(a->frbp); + gen_helper_DCTFIXQQ(cpu_env, rt, rb); + tcg_temp_free_ptr(rt); + tcg_temp_free_ptr(rb); + + return true; +} diff --git a/target/ppc/translate/dfp-ops.c.inc b/target/ppc/translate/dfp-ops.c.inc deleted file mode 100644 index 6ef38e5712..0000000000 --- a/target/ppc/translate/dfp-ops.c.inc +++ /dev/null @@ -1,165 +0,0 @@ -#define _GEN_DFP_LONG(name, op1, op2, mask) \ -GEN_HANDLER_E(name, 0x3B, op1, op2, mask, PPC_NONE, PPC2_DFP) - -#define _GEN_DFP_LONG_300(name, op1, op2, mask) \ -GEN_HANDLER_E(name, 0x3B, op1, op2, mask, PPC_NONE, PPC2_ISA300) - -#define _GEN_DFP_LONGx2(name, op1, op2, mask) \ -GEN_HANDLER_E(name, 0x3B, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \ -GEN_HANDLER_E(name, 0x3B, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP) - -#define _GEN_DFP_LONGx4(name, op1, op2, mask) \ -GEN_HANDLER_E(name, 0x3B, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \ -GEN_HANDLER_E(name, 0x3B, op1, 0x08 | op2, mask, PPC_NONE, PPC2_DFP), \ -GEN_HANDLER_E(name, 0x3B, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP), \ -GEN_HANDLER_E(name, 0x3B, op1, 0x18 | op2, mask, PPC_NONE, PPC2_DFP) - -#define _GEN_DFP_QUAD(name, op1, op2, mask) \ -GEN_HANDLER_E(name, 0x3F, op1, op2, mask, PPC_NONE, PPC2_DFP) - -#define _GEN_DFP_QUAD_300(name, op1, op2, mask) \ -GEN_HANDLER_E(name, 0x3F, op1, op2, mask, PPC_NONE, PPC2_ISA300) - -#define _GEN_DFP_QUADx2(name, op1, op2, mask) \ -GEN_HANDLER_E(name, 0x3F, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \ -GEN_HANDLER_E(name, 0x3F, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP) - -#define _GEN_DFP_QUADx4(name, op1, op2, mask) \ -GEN_HANDLER_E(name, 0x3F, op1, 0x00 | op2, mask, PPC_NONE, PPC2_DFP), \ -GEN_HANDLER_E(name, 0x3F, op1, 0x08 | op2, mask, PPC_NONE, PPC2_DFP), \ -GEN_HANDLER_E(name, 0x3F, op1, 0x10 | op2, mask, PPC_NONE, PPC2_DFP), \ -GEN_HANDLER_E(name, 0x3F, op1, 0x18 | op2, mask, PPC_NONE, PPC2_DFP) - -#define GEN_DFP_T_A_B_Rc(name, op1, op2) \ -_GEN_DFP_LONG(name, op1, op2, 0x00000000) - -#define GEN_DFP_Tp_Ap_Bp_Rc(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x00210800) - -#define GEN_DFP_Tp_A_Bp_Rc(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x00200800) - -#define GEN_DFP_T_B_Rc(name, op1, op2) \ -_GEN_DFP_LONG(name, op1, op2, 0x001F0000) - -#define GEN_DFP_Tp_Bp_Rc(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x003F0800) - -#define GEN_DFP_Tp_B_Rc(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x003F0000) - -#define GEN_DFP_T_Bp_Rc(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x001F0800) - -#define GEN_DFP_BF_A_B(name, op1, op2) \ -_GEN_DFP_LONG(name, op1, op2, 0x00000001) - -#define GEN_DFP_BF_A_B_300(name, op1, op2) \ -_GEN_DFP_LONG_300(name, op1, op2, 0x00400001) - -#define GEN_DFP_BF_Ap_Bp(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x00610801) - -#define GEN_DFP_BF_A_Bp(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x00600801) - -#define GEN_DFP_BF_A_Bp_300(name, op1, op2) \ -_GEN_DFP_QUAD_300(name, op1, op2, 0x00400001) - -#define GEN_DFP_BF_A_DCM(name, op1, op2) \ -_GEN_DFP_LONGx2(name, op1, op2, 0x00600001) - -#define GEN_DFP_BF_Ap_DCM(name, op1, op2) \ -_GEN_DFP_QUADx2(name, op1, op2, 0x00610001) - -#define GEN_DFP_T_A_B_RMC_Rc(name, op1, op2) \ -_GEN_DFP_LONGx4(name, op1, op2, 0x00000000) - -#define GEN_DFP_Tp_Ap_Bp_RMC_Rc(name, op1, op2) \ -_GEN_DFP_QUADx4(name, op1, op2, 0x02010800) - -#define GEN_DFP_Tp_A_Bp_RMC_Rc(name, op1, op2) \ -_GEN_DFP_QUADx4(name, op1, op2, 0x02000800) - -#define GEN_DFP_TE_T_B_RMC_Rc(name, op1, op2) \ -_GEN_DFP_LONGx4(name, op1, op2, 0x00000000) - -#define GEN_DFP_TE_Tp_Bp_RMC_Rc(name, op1, op2) \ -_GEN_DFP_QUADx4(name, op1, op2, 0x00200800) - -#define GEN_DFP_R_T_B_RMC_Rc(name, op1, op2) \ -_GEN_DFP_LONGx4(name, op1, op2, 0x001E0000) - -#define GEN_DFP_R_Tp_Bp_RMC_Rc(name, op1, op2) \ -_GEN_DFP_QUADx4(name, op1, op2, 0x003E0800) - -#define GEN_DFP_SP_T_B_Rc(name, op1, op2) \ -_GEN_DFP_LONG(name, op1, op2, 0x00070000) - -#define GEN_DFP_SP_Tp_Bp_Rc(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x00270800) - -#define GEN_DFP_S_T_B_Rc(name, op1, op2) \ -_GEN_DFP_LONG(name, op1, op2, 0x000F0000) - -#define GEN_DFP_S_Tp_Bp_Rc(name, op1, op2) \ -_GEN_DFP_QUAD(name, op1, op2, 0x002F0800) - -#define GEN_DFP_T_A_SH_Rc(name, op1, op2) \ -_GEN_DFP_LONGx2(name, op1, op2, 0x00000000) - -#define GEN_DFP_Tp_Ap_SH_Rc(name, op1, op2) \ -_GEN_DFP_QUADx2(name, op1, op2, 0x00210000) - -GEN_DFP_T_A_B_Rc(dadd, 0x02, 0x00), -GEN_DFP_Tp_Ap_Bp_Rc(daddq, 0x02, 0x00), -GEN_DFP_T_A_B_Rc(dsub, 0x02, 0x10), -GEN_DFP_Tp_Ap_Bp_Rc(dsubq, 0x02, 0x10), -GEN_DFP_T_A_B_Rc(dmul, 0x02, 0x01), -GEN_DFP_Tp_Ap_Bp_Rc(dmulq, 0x02, 0x01), -GEN_DFP_T_A_B_Rc(ddiv, 0x02, 0x11), -GEN_DFP_Tp_Ap_Bp_Rc(ddivq, 0x02, 0x11), -GEN_DFP_BF_A_B(dcmpu, 0x02, 0x14), -GEN_DFP_BF_Ap_Bp(dcmpuq, 0x02, 0x14), -GEN_DFP_BF_A_B(dcmpo, 0x02, 0x04), -GEN_DFP_BF_Ap_Bp(dcmpoq, 0x02, 0x04), -GEN_DFP_BF_A_DCM(dtstdc, 0x02, 0x06), -GEN_DFP_BF_Ap_DCM(dtstdcq, 0x02, 0x06), -GEN_DFP_BF_A_DCM(dtstdg, 0x02, 0x07), -GEN_DFP_BF_Ap_DCM(dtstdgq, 0x02, 0x07), -GEN_DFP_BF_A_B(dtstex, 0x02, 0x05), -GEN_DFP_BF_Ap_Bp(dtstexq, 0x02, 0x05), -GEN_DFP_BF_A_B(dtstsf, 0x02, 0x15), -GEN_DFP_BF_A_Bp(dtstsfq, 0x02, 0x15), -GEN_DFP_BF_A_B_300(dtstsfi, 0x03, 0x15), -GEN_DFP_BF_A_Bp_300(dtstsfiq, 0x03, 0x15), -GEN_DFP_TE_T_B_RMC_Rc(dquai, 0x03, 0x02), -GEN_DFP_TE_Tp_Bp_RMC_Rc(dquaiq, 0x03, 0x02), -GEN_DFP_T_A_B_RMC_Rc(dqua, 0x03, 0x00), -GEN_DFP_Tp_Ap_Bp_RMC_Rc(dquaq, 0x03, 0x00), -GEN_DFP_T_A_B_RMC_Rc(drrnd, 0x03, 0x01), -GEN_DFP_Tp_A_Bp_RMC_Rc(drrndq, 0x03, 0x01), -GEN_DFP_R_T_B_RMC_Rc(drintx, 0x03, 0x03), -GEN_DFP_R_Tp_Bp_RMC_Rc(drintxq, 0x03, 0x03), -GEN_DFP_R_T_B_RMC_Rc(drintn, 0x03, 0x07), -GEN_DFP_R_Tp_Bp_RMC_Rc(drintnq, 0x03, 0x07), -GEN_DFP_T_B_Rc(dctdp, 0x02, 0x08), -GEN_DFP_Tp_B_Rc(dctqpq, 0x02, 0x08), -GEN_DFP_T_B_Rc(drsp, 0x02, 0x18), -GEN_DFP_Tp_Bp_Rc(drdpq, 0x02, 0x18), -GEN_DFP_T_B_Rc(dcffix, 0x02, 0x19), -GEN_DFP_Tp_B_Rc(dcffixq, 0x02, 0x19), -GEN_DFP_T_B_Rc(dctfix, 0x02, 0x09), -GEN_DFP_T_Bp_Rc(dctfixq, 0x02, 0x09), -GEN_DFP_SP_T_B_Rc(ddedpd, 0x02, 0x0a), -GEN_DFP_SP_Tp_Bp_Rc(ddedpdq, 0x02, 0x0a), -GEN_DFP_S_T_B_Rc(denbcd, 0x02, 0x1a), -GEN_DFP_S_Tp_Bp_Rc(denbcdq, 0x02, 0x1a), -GEN_DFP_T_B_Rc(dxex, 0x02, 0x0b), -GEN_DFP_T_Bp_Rc(dxexq, 0x02, 0x0b), -GEN_DFP_T_A_B_Rc(diex, 0x02, 0x1b), -GEN_DFP_Tp_A_Bp_Rc(diexq, 0x02, 0x1b), -GEN_DFP_T_A_SH_Rc(dscli, 0x02, 0x02), -GEN_DFP_Tp_Ap_SH_Rc(dscliq, 0x02, 0x02), -GEN_DFP_T_A_SH_Rc(dscri, 0x02, 0x03), -GEN_DFP_Tp_Ap_SH_Rc(dscriq, 0x02, 0x03), diff --git a/target/ppc/translate/fixedpoint-impl.c.inc b/target/ppc/translate/fixedpoint-impl.c.inc index 2e2518ee15..7fecff4579 100644 --- a/target/ppc/translate/fixedpoint-impl.c.inc +++ b/target/ppc/translate/fixedpoint-impl.c.inc @@ -17,25 +17,6 @@ * License along with this library; if not, see . */ -/* - * Incorporate CIA into the constant when R=1. - * Validate that when R=1, RA=0. - */ -static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a) -{ - d->rt = a->rt; - d->ra = a->ra; - d->si = a->si; - if (a->r) { - if (unlikely(a->ra != 0)) { - gen_invalid(ctx); - return false; - } - d->si += ctx->cia; - } - return true; -} - /* * Fixed-Point Load/Store Instructions */ @@ -51,15 +32,7 @@ static bool do_ldst(DisasContext *ctx, int rt, int ra, TCGv displ, bool update, } gen_set_access_type(ctx, ACCESS_INT); - ea = tcg_temp_new(); - if (ra) { - tcg_gen_add_tl(ea, cpu_gpr[ra], displ); - } else { - tcg_gen_mov_tl(ea, displ); - } - if (NARROW_MODE(ctx)) { - tcg_gen_ext32u_tl(ea, ea); - } + ea = do_ea_calc(ctx, ra, displ); mop ^= ctx->default_tcg_memop_mask; if (store) { tcg_gen_qemu_st_tl(cpu_gpr[rt], ea, ctx->mem_idx, mop); @@ -96,6 +69,107 @@ static bool do_ldst_X(DisasContext *ctx, arg_X *a, bool update, return do_ldst(ctx, a->rt, a->ra, cpu_gpr[a->rb], update, store, mop); } +static bool do_ldst_quad(DisasContext *ctx, arg_D *a, bool store, bool prefixed) +{ +#if defined(TARGET_PPC64) + TCGv ea; + TCGv_i64 low_addr_gpr, high_addr_gpr; + MemOp mop; + + REQUIRE_INSNS_FLAGS(ctx, 64BX); + + if (!prefixed && !(ctx->insns_flags2 & PPC2_LSQ_ISA207)) { + if (ctx->pr) { + /* lq and stq were privileged prior to V. 2.07 */ + gen_priv_exception(ctx, POWERPC_EXCP_PRIV_OPC); + return true; + } + + if (ctx->le_mode) { + gen_align_no_le(ctx); + return true; + } + } + + if (!store && unlikely(a->ra == a->rt)) { + gen_invalid(ctx); + return true; + } + + gen_set_access_type(ctx, ACCESS_INT); + ea = do_ea_calc(ctx, a->ra, tcg_constant_tl(a->si)); + + if (prefixed || !ctx->le_mode) { + low_addr_gpr = cpu_gpr[a->rt]; + high_addr_gpr = cpu_gpr[a->rt + 1]; + } else { + low_addr_gpr = cpu_gpr[a->rt + 1]; + high_addr_gpr = cpu_gpr[a->rt]; + } + + if (tb_cflags(ctx->base.tb) & CF_PARALLEL) { + if (HAVE_ATOMIC128) { + mop = DEF_MEMOP(MO_128); + TCGv_i32 oi = tcg_constant_i32(make_memop_idx(mop, ctx->mem_idx)); + if (store) { + if (ctx->le_mode) { + gen_helper_stq_le_parallel(cpu_env, ea, low_addr_gpr, + high_addr_gpr, oi); + } else { + gen_helper_stq_be_parallel(cpu_env, ea, high_addr_gpr, + low_addr_gpr, oi); + + } + } else { + if (ctx->le_mode) { + gen_helper_lq_le_parallel(low_addr_gpr, cpu_env, ea, oi); + tcg_gen_ld_i64(high_addr_gpr, cpu_env, + offsetof(CPUPPCState, retxh)); + } else { + gen_helper_lq_be_parallel(high_addr_gpr, cpu_env, ea, oi); + tcg_gen_ld_i64(low_addr_gpr, cpu_env, + offsetof(CPUPPCState, retxh)); + } + } + } else { + /* Restart with exclusive lock. */ + gen_helper_exit_atomic(cpu_env); + ctx->base.is_jmp = DISAS_NORETURN; + } + } else { + mop = DEF_MEMOP(MO_Q); + if (store) { + tcg_gen_qemu_st_i64(low_addr_gpr, ea, ctx->mem_idx, mop); + } else { + tcg_gen_qemu_ld_i64(low_addr_gpr, ea, ctx->mem_idx, mop); + } + + gen_addr_add(ctx, ea, ea, 8); + + if (store) { + tcg_gen_qemu_st_i64(high_addr_gpr, ea, ctx->mem_idx, mop); + } else { + tcg_gen_qemu_ld_i64(high_addr_gpr, ea, ctx->mem_idx, mop); + } + } + tcg_temp_free(ea); +#else + qemu_build_not_reached(); +#endif + + return true; +} + +static bool do_ldst_quad_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool store) +{ + arg_D d; + if (!resolve_PLS_D(ctx, &d, a)) { + return true; + } + + return do_ldst_quad(ctx, &d, store, true); +} + /* Load Byte and Zero */ TRANS(LBZ, do_ldst_D, false, false, MO_UB) TRANS(LBZX, do_ldst_X, false, false, MO_UB) @@ -137,6 +211,10 @@ TRANS64(LDU, do_ldst_D, true, false, MO_Q) TRANS64(LDUX, do_ldst_X, true, false, MO_Q) TRANS64(PLD, do_ldst_PLS_D, false, false, MO_Q) +/* Load Quadword */ +TRANS64(LQ, do_ldst_quad, false, false); +TRANS64(PLQ, do_ldst_quad_PLS_D, false); + /* Store Byte */ TRANS(STB, do_ldst_D, false, true, MO_UB) TRANS(STBX, do_ldst_X, false, true, MO_UB) @@ -165,6 +243,10 @@ TRANS64(STDU, do_ldst_D, true, true, MO_Q) TRANS64(STDUX, do_ldst_X, true, true, MO_Q) TRANS64(PSTD, do_ldst_PLS_D, false, true, MO_Q) +/* Store Quadword */ +TRANS64(STQ, do_ldst_quad, true, false); +TRANS64(PSTQ, do_ldst_quad_PLS_D, true); + /* * Fixed-Point Compare Instructions */ @@ -325,7 +407,86 @@ static bool trans_CFUGED(DisasContext *ctx, arg_X *a) REQUIRE_64BIT(ctx); REQUIRE_INSNS_FLAGS2(ctx, ISA310); #if defined(TARGET_PPC64) - gen_helper_cfuged(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]); + gen_helper_CFUGED(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static void do_cntzdm(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 mask, int64_t trail) +{ + TCGv_i64 t0, t1; + + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + + tcg_gen_and_i64(t0, src, mask); + if (trail) { + tcg_gen_ctzi_i64(t0, t0, -1); + } else { + tcg_gen_clzi_i64(t0, t0, -1); + } + + tcg_gen_setcondi_i64(TCG_COND_NE, t1, t0, -1); + tcg_gen_andi_i64(t0, t0, 63); + tcg_gen_xori_i64(t0, t0, 63); + if (trail) { + tcg_gen_shl_i64(t0, mask, t0); + tcg_gen_shl_i64(t0, t0, t1); + } else { + tcg_gen_shr_i64(t0, mask, t0); + tcg_gen_shr_i64(t0, t0, t1); + } + + tcg_gen_ctpop_i64(dst, t0); + + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); +} + +static bool trans_CNTLZDM(DisasContext *ctx, arg_X *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA310); +#if defined(TARGET_PPC64) + do_cntzdm(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb], false); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_CNTTZDM(DisasContext *ctx, arg_X *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA310); +#if defined(TARGET_PPC64) + do_cntzdm(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb], true); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_PDEPD(DisasContext *ctx, arg_X *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA310); +#if defined(TARGET_PPC64) + gen_helper_PDEPD(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_PEXTD(DisasContext *ctx, arg_X *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA310); +#if defined(TARGET_PPC64) + gen_helper_PEXTD(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]); #else qemu_build_not_reached(); #endif diff --git a/target/ppc/translate/fp-impl.c.inc b/target/ppc/translate/fp-impl.c.inc index 9f7868ee28..c9e05201d9 100644 --- a/target/ppc/translate/fp-impl.c.inc +++ b/target/ppc/translate/fp-impl.c.inc @@ -854,99 +854,6 @@ static void gen_mtfsfi(DisasContext *ctx) gen_helper_float_check_status(cpu_env); } -/*** Floating-point load ***/ -#define GEN_LDF(name, ldop, opc, type) \ -static void glue(gen_, name)(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 t0; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_FLOAT); \ - EA = tcg_temp_new(); \ - t0 = tcg_temp_new_i64(); \ - gen_addr_imm_index(ctx, EA, 0); \ - gen_qemu_##ldop(ctx, t0, EA); \ - set_fpr(rD(ctx->opcode), t0); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ -} - -#define GEN_LDUF(name, ldop, opc, type) \ -static void glue(gen_, name##u)(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 t0; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - if (unlikely(rA(ctx->opcode) == 0)) { \ - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_FLOAT); \ - EA = tcg_temp_new(); \ - t0 = tcg_temp_new_i64(); \ - gen_addr_imm_index(ctx, EA, 0); \ - gen_qemu_##ldop(ctx, t0, EA); \ - set_fpr(rD(ctx->opcode), t0); \ - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ -} - -#define GEN_LDUXF(name, ldop, opc, type) \ -static void glue(gen_, name##ux)(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 t0; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - t0 = tcg_temp_new_i64(); \ - if (unlikely(rA(ctx->opcode) == 0)) { \ - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_FLOAT); \ - EA = tcg_temp_new(); \ - gen_addr_reg_index(ctx, EA); \ - gen_qemu_##ldop(ctx, t0, EA); \ - set_fpr(rD(ctx->opcode), t0); \ - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ -} - -#define GEN_LDXF(name, ldop, opc2, opc3, type) \ -static void glue(gen_, name##x)(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 t0; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_FLOAT); \ - EA = tcg_temp_new(); \ - t0 = tcg_temp_new_i64(); \ - gen_addr_reg_index(ctx, EA); \ - gen_qemu_##ldop(ctx, t0, EA); \ - set_fpr(rD(ctx->opcode), t0); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ -} - -#define GEN_LDFS(name, ldop, op, type) \ -GEN_LDF(name, ldop, op | 0x20, type); \ -GEN_LDUF(name, ldop, op | 0x21, type); \ -GEN_LDUXF(name, ldop, op | 0x01, type); \ -GEN_LDXF(name, ldop, 0x17, op | 0x00, type) - static void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 dest, TCGv addr) { TCGv_i32 tmp = tcg_temp_new_i32(); @@ -955,11 +862,6 @@ static void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 dest, TCGv addr) tcg_temp_free_i32(tmp); } - /* lfd lfdu lfdux lfdx */ -GEN_LDFS(lfd, ld64_i64, 0x12, PPC_FLOAT); - /* lfs lfsu lfsux lfsx */ -GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT); - /* lfdepx (external PID lfdx) */ static void gen_lfdepx(DisasContext *ctx) { @@ -1089,73 +991,6 @@ static void gen_lfiwzx(DisasContext *ctx) tcg_temp_free(EA); tcg_temp_free_i64(t0); } -/*** Floating-point store ***/ -#define GEN_STF(name, stop, opc, type) \ -static void glue(gen_, name)(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 t0; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_FLOAT); \ - EA = tcg_temp_new(); \ - t0 = tcg_temp_new_i64(); \ - gen_addr_imm_index(ctx, EA, 0); \ - get_fpr(t0, rS(ctx->opcode)); \ - gen_qemu_##stop(ctx, t0, EA); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ -} - -#define GEN_STUF(name, stop, opc, type) \ -static void glue(gen_, name##u)(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 t0; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - if (unlikely(rA(ctx->opcode) == 0)) { \ - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_FLOAT); \ - EA = tcg_temp_new(); \ - t0 = tcg_temp_new_i64(); \ - gen_addr_imm_index(ctx, EA, 0); \ - get_fpr(t0, rS(ctx->opcode)); \ - gen_qemu_##stop(ctx, t0, EA); \ - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ -} - -#define GEN_STUXF(name, stop, opc, type) \ -static void glue(gen_, name##ux)(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 t0; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - if (unlikely(rA(ctx->opcode) == 0)) { \ - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_FLOAT); \ - EA = tcg_temp_new(); \ - t0 = tcg_temp_new_i64(); \ - gen_addr_reg_index(ctx, EA); \ - get_fpr(t0, rS(ctx->opcode)); \ - gen_qemu_##stop(ctx, t0, EA); \ - tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(t0); \ -} #define GEN_STXF(name, stop, opc2, opc3, type) \ static void glue(gen_, name##x)(DisasContext *ctx) \ @@ -1176,12 +1011,6 @@ static void glue(gen_, name##x)(DisasContext *ctx) \ tcg_temp_free_i64(t0); \ } -#define GEN_STFS(name, stop, op, type) \ -GEN_STF(name, stop, op | 0x20, type); \ -GEN_STUF(name, stop, op | 0x21, type); \ -GEN_STUXF(name, stop, op | 0x01, type); \ -GEN_STXF(name, stop, 0x17, op | 0x00, type) - static void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 src, TCGv addr) { TCGv_i32 tmp = tcg_temp_new_i32(); @@ -1190,11 +1019,6 @@ static void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 src, TCGv addr) tcg_temp_free_i32(tmp); } -/* stfd stfdu stfdux stfdx */ -GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT); -/* stfs stfsu stfsux stfsx */ -GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT); - /* stfdepx (external PID lfdx) */ static void gen_stfdepx(DisasContext *ctx) { @@ -1473,6 +1297,91 @@ static void gen_stfqx(DisasContext *ctx) tcg_temp_free_i64(t1); } +/* Floating-point Load/Store Instructions */ +static bool do_lsfpsd(DisasContext *ctx, int rt, int ra, TCGv displ, + bool update, bool store, bool single) +{ + TCGv ea; + TCGv_i64 t0; + REQUIRE_INSNS_FLAGS(ctx, FLOAT); + REQUIRE_FPU(ctx); + if (update && ra == 0) { + gen_invalid(ctx); + return true; + } + gen_set_access_type(ctx, ACCESS_FLOAT); + t0 = tcg_temp_new_i64(); + ea = do_ea_calc(ctx, ra, displ); + if (store) { + get_fpr(t0, rt); + if (single) { + gen_qemu_st32fs(ctx, t0, ea); + } else { + gen_qemu_st64_i64(ctx, t0, ea); + } + } else { + if (single) { + gen_qemu_ld32fs(ctx, t0, ea); + } else { + gen_qemu_ld64_i64(ctx, t0, ea); + } + set_fpr(rt, t0); + } + if (update) { + tcg_gen_mov_tl(cpu_gpr[ra], ea); + } + tcg_temp_free_i64(t0); + tcg_temp_free(ea); + return true; +} + +static bool do_lsfp_D(DisasContext *ctx, arg_D *a, bool update, bool store, + bool single) +{ + return do_lsfpsd(ctx, a->rt, a->ra, tcg_constant_tl(a->si), update, store, + single); +} + +static bool do_lsfp_PLS_D(DisasContext *ctx, arg_PLS_D *a, bool update, + bool store, bool single) +{ + arg_D d; + if (!resolve_PLS_D(ctx, &d, a)) { + return true; + } + return do_lsfp_D(ctx, &d, update, store, single); +} + +static bool do_lsfp_X(DisasContext *ctx, arg_X *a, bool update, + bool store, bool single) +{ + return do_lsfpsd(ctx, a->rt, a->ra, cpu_gpr[a->rb], update, store, single); +} + +TRANS(LFS, do_lsfp_D, false, false, true) +TRANS(LFSU, do_lsfp_D, true, false, true) +TRANS(LFSX, do_lsfp_X, false, false, true) +TRANS(LFSUX, do_lsfp_X, true, false, true) +TRANS(PLFS, do_lsfp_PLS_D, false, false, true) + +TRANS(LFD, do_lsfp_D, false, false, false) +TRANS(LFDU, do_lsfp_D, true, false, false) +TRANS(LFDX, do_lsfp_X, false, false, false) +TRANS(LFDUX, do_lsfp_X, true, false, false) +TRANS(PLFD, do_lsfp_PLS_D, false, false, false) + +TRANS(STFS, do_lsfp_D, false, true, true) +TRANS(STFSU, do_lsfp_D, true, true, true) +TRANS(STFSX, do_lsfp_X, false, true, true) +TRANS(STFSUX, do_lsfp_X, true, true, true) +TRANS(PSTFS, do_lsfp_PLS_D, false, true, true) + +TRANS(STFD, do_lsfp_D, false, true, false) +TRANS(STFDU, do_lsfp_D, true, true, false) +TRANS(STFDX, do_lsfp_X, false, true, false) +TRANS(STFDUX, do_lsfp_X, true, true, false) +TRANS(PSTFD, do_lsfp_PLS_D, false, true, false) + #undef _GEN_FLOAT_ACB #undef GEN_FLOAT_ACB #undef _GEN_FLOAT_AB diff --git a/target/ppc/translate/fp-ops.c.inc b/target/ppc/translate/fp-ops.c.inc index 88fab65628..4260635a12 100644 --- a/target/ppc/translate/fp-ops.c.inc +++ b/target/ppc/translate/fp-ops.c.inc @@ -50,43 +50,14 @@ GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT), GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT), GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT), -#define GEN_LDF(name, ldop, opc, type) \ -GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type), -#define GEN_LDUF(name, ldop, opc, type) \ -GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type), -#define GEN_LDUXF(name, ldop, opc, type) \ -GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type), -#define GEN_LDXF(name, ldop, opc2, opc3, type) \ -GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type), -#define GEN_LDFS(name, ldop, op, type) \ -GEN_LDF(name, ldop, op | 0x20, type) \ -GEN_LDUF(name, ldop, op | 0x21, type) \ -GEN_LDUXF(name, ldop, op | 0x01, type) \ -GEN_LDXF(name, ldop, 0x17, op | 0x00, type) - -GEN_LDFS(lfd, ld64, 0x12, PPC_FLOAT) -GEN_LDFS(lfs, ld32fs, 0x10, PPC_FLOAT) GEN_HANDLER_E(lfdepx, 0x1F, 0x1F, 0x12, 0x00000001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER_E(lfiwax, 0x1f, 0x17, 0x1a, 0x00000001, PPC_NONE, PPC2_ISA205), GEN_HANDLER_E(lfiwzx, 0x1f, 0x17, 0x1b, 0x1, PPC_NONE, PPC2_FP_CVT_ISA206), GEN_HANDLER_E(lfdpx, 0x1F, 0x17, 0x18, 0x00200001, PPC_NONE, PPC2_ISA205), -#define GEN_STF(name, stop, opc, type) \ -GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type), -#define GEN_STUF(name, stop, opc, type) \ -GEN_HANDLER(name##u, opc, 0xFF, 0xFF, 0x00000000, type), -#define GEN_STUXF(name, stop, opc, type) \ -GEN_HANDLER(name##ux, 0x1F, 0x17, opc, 0x00000001, type), #define GEN_STXF(name, stop, opc2, opc3, type) \ GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type), -#define GEN_STFS(name, stop, op, type) \ -GEN_STF(name, stop, op | 0x20, type) \ -GEN_STUF(name, stop, op | 0x21, type) \ -GEN_STUXF(name, stop, op | 0x01, type) \ -GEN_STXF(name, stop, 0x17, op | 0x00, type) -GEN_STFS(stfd, st64_i64, 0x16, PPC_FLOAT) -GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT) GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX) GEN_HANDLER_E(stfdepx, 0x1F, 0x1F, 0x16, 0x00000001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER_E(stfdpx, 0x1F, 0x17, 0x1C, 0x00200001, PPC_NONE, PPC2_ISA205), diff --git a/target/ppc/translate/vector-impl.c.inc b/target/ppc/translate/vector-impl.c.inc deleted file mode 100644 index 117ce9b137..0000000000 --- a/target/ppc/translate/vector-impl.c.inc +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Power ISA decode for Vector Facility instructions - * - * Copyright (c) 2021 Instituto de Pesquisas Eldorado (eldorado.org.br) - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#define REQUIRE_ALTIVEC(CTX) \ - do { \ - if (unlikely(!(CTX)->altivec_enabled)) { \ - gen_exception((CTX), POWERPC_EXCP_VPU); \ - return true; \ - } \ - } while (0) - -static bool trans_VCFUGED(DisasContext *ctx, arg_VX *a) -{ - TCGv_i64 tgt, src, mask; - - REQUIRE_INSNS_FLAGS2(ctx, ISA310); - REQUIRE_ALTIVEC(ctx); - - tgt = tcg_temp_new_i64(); - src = tcg_temp_new_i64(); - mask = tcg_temp_new_i64(); - - /* centrifuge lower double word */ - get_cpu_vsrl(src, a->vra + 32); - get_cpu_vsrl(mask, a->vrb + 32); - gen_helper_cfuged(tgt, src, mask); - set_cpu_vsrl(a->vrt + 32, tgt); - - /* centrifuge higher double word */ - get_cpu_vsrh(src, a->vra + 32); - get_cpu_vsrh(mask, a->vrb + 32); - gen_helper_cfuged(tgt, src, mask); - set_cpu_vsrh(a->vrt + 32, tgt); - - tcg_temp_free_i64(tgt); - tcg_temp_free_i64(src); - tcg_temp_free_i64(mask); - - return true; -} diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc index 92b9527aff..8eb8d3a067 100644 --- a/target/ppc/translate/vmx-impl.c.inc +++ b/target/ppc/translate/vmx-impl.c.inc @@ -1217,10 +1217,6 @@ GEN_VXFORM_UIMM_SPLAT(vextractub, 6, 8, 15); GEN_VXFORM_UIMM_SPLAT(vextractuh, 6, 9, 14); GEN_VXFORM_UIMM_SPLAT(vextractuw, 6, 10, 12); GEN_VXFORM_UIMM_SPLAT(vextractd, 6, 11, 8); -GEN_VXFORM_UIMM_SPLAT(vinsertb, 6, 12, 15); -GEN_VXFORM_UIMM_SPLAT(vinserth, 6, 13, 14); -GEN_VXFORM_UIMM_SPLAT(vinsertw, 6, 14, 12); -GEN_VXFORM_UIMM_SPLAT(vinsertd, 6, 15, 8); GEN_VXFORM_UIMM_ENV(vcfux, 5, 12); GEN_VXFORM_UIMM_ENV(vcfsx, 5, 13); GEN_VXFORM_UIMM_ENV(vctuxs, 5, 14); @@ -1231,12 +1227,184 @@ GEN_VXFORM_DUAL(vsplth, PPC_ALTIVEC, PPC_NONE, vextractuh, PPC_NONE, PPC2_ISA300); GEN_VXFORM_DUAL(vspltw, PPC_ALTIVEC, PPC_NONE, vextractuw, PPC_NONE, PPC2_ISA300); -GEN_VXFORM_DUAL(vspltisb, PPC_ALTIVEC, PPC_NONE, - vinsertb, PPC_NONE, PPC2_ISA300); -GEN_VXFORM_DUAL(vspltish, PPC_ALTIVEC, PPC_NONE, - vinserth, PPC_NONE, PPC2_ISA300); -GEN_VXFORM_DUAL(vspltisw, PPC_ALTIVEC, PPC_NONE, - vinsertw, PPC_NONE, PPC2_ISA300); + +static bool do_vextdx(DisasContext *ctx, arg_VA *a, int size, bool right, + void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv)) +{ + TCGv_ptr vrt, vra, vrb; + TCGv rc; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + vrt = gen_avr_ptr(a->vrt); + vra = gen_avr_ptr(a->vra); + vrb = gen_avr_ptr(a->vrb); + rc = tcg_temp_new(); + + tcg_gen_andi_tl(rc, cpu_gpr[a->rc], 0x1F); + if (right) { + tcg_gen_subfi_tl(rc, 32 - size, rc); + } + gen_helper(cpu_env, vrt, vra, vrb, rc); + + tcg_temp_free_ptr(vrt); + tcg_temp_free_ptr(vra); + tcg_temp_free_ptr(vrb); + tcg_temp_free(rc); + return true; +} + +TRANS(VEXTDUBVLX, do_vextdx, 1, false, gen_helper_VEXTDUBVLX) +TRANS(VEXTDUHVLX, do_vextdx, 2, false, gen_helper_VEXTDUHVLX) +TRANS(VEXTDUWVLX, do_vextdx, 4, false, gen_helper_VEXTDUWVLX) +TRANS(VEXTDDVLX, do_vextdx, 8, false, gen_helper_VEXTDDVLX) + +TRANS(VEXTDUBVRX, do_vextdx, 1, true, gen_helper_VEXTDUBVLX) +TRANS(VEXTDUHVRX, do_vextdx, 2, true, gen_helper_VEXTDUHVLX) +TRANS(VEXTDUWVRX, do_vextdx, 4, true, gen_helper_VEXTDUWVLX) +TRANS(VEXTDDVRX, do_vextdx, 8, true, gen_helper_VEXTDDVLX) + +static bool do_vinsx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra, + TCGv_i64 rb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) +{ + TCGv_ptr t; + TCGv idx; + + t = gen_avr_ptr(vrt); + idx = tcg_temp_new(); + + tcg_gen_andi_tl(idx, ra, 0xF); + if (right) { + tcg_gen_subfi_tl(idx, 16 - size, idx); + } + + gen_helper(cpu_env, t, rb, idx); + + tcg_temp_free_ptr(t); + tcg_temp_free(idx); + + return true; +} + +static bool do_vinsvx(DisasContext *ctx, int vrt, int size, bool right, TCGv ra, + int vrb, void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) +{ + bool ok; + TCGv_i64 val; + + val = tcg_temp_new_i64(); + get_avr64(val, vrb, true); + ok = do_vinsx(ctx, vrt, size, right, ra, val, gen_helper); + + tcg_temp_free_i64(val); + return ok; +} + +static bool do_vinsx_VX(DisasContext *ctx, arg_VX *a, int size, bool right, + void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) +{ + bool ok; + TCGv_i64 val; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + val = tcg_temp_new_i64(); + tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]); + + ok = do_vinsx(ctx, a->vrt, size, right, cpu_gpr[a->vra], val, gen_helper); + + tcg_temp_free_i64(val); + return ok; +} + +static bool do_vinsvx_VX(DisasContext *ctx, arg_VX *a, int size, bool right, + void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + return do_vinsvx(ctx, a->vrt, size, right, cpu_gpr[a->vra], a->vrb, + gen_helper); +} + +static bool do_vins_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size, + void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) +{ + bool ok; + TCGv_i64 val; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + if (a->uim > (16 - size)) { + /* + * PowerISA v3.1 says that the resulting value is undefined in this + * case, so just log a guest error and leave VRT unchanged. The + * real hardware would do a partial insert, e.g. if VRT is zeroed and + * RB is 0x12345678, executing "vinsw VRT,RB,14" results in + * VRT = 0x0000...00001234, but we don't bother to reproduce this + * behavior as software shouldn't rely on it. + */ + qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINS* at" + " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim, + 16 - size); + return true; + } + + val = tcg_temp_new_i64(); + tcg_gen_extu_tl_i64(val, cpu_gpr[a->vrb]); + + ok = do_vinsx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), val, + gen_helper); + + tcg_temp_free_i64(val); + return ok; +} + +static bool do_vinsert_VX_uim4(DisasContext *ctx, arg_VX_uim4 *a, int size, + void (*gen_helper)(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv)) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + REQUIRE_VECTOR(ctx); + + if (a->uim > (16 - size)) { + qemu_log_mask(LOG_GUEST_ERROR, "Invalid index for VINSERT* at" + " 0x" TARGET_FMT_lx ", UIM = %d > %d\n", ctx->cia, a->uim, + 16 - size); + return true; + } + + return do_vinsvx(ctx, a->vrt, size, false, tcg_constant_tl(a->uim), a->vrb, + gen_helper); +} + +TRANS(VINSBLX, do_vinsx_VX, 1, false, gen_helper_VINSBLX) +TRANS(VINSHLX, do_vinsx_VX, 2, false, gen_helper_VINSHLX) +TRANS(VINSWLX, do_vinsx_VX, 4, false, gen_helper_VINSWLX) +TRANS(VINSDLX, do_vinsx_VX, 8, false, gen_helper_VINSDLX) + +TRANS(VINSBRX, do_vinsx_VX, 1, true, gen_helper_VINSBLX) +TRANS(VINSHRX, do_vinsx_VX, 2, true, gen_helper_VINSHLX) +TRANS(VINSWRX, do_vinsx_VX, 4, true, gen_helper_VINSWLX) +TRANS(VINSDRX, do_vinsx_VX, 8, true, gen_helper_VINSDLX) + +TRANS(VINSW, do_vins_VX_uim4, 4, gen_helper_VINSWLX) +TRANS(VINSD, do_vins_VX_uim4, 8, gen_helper_VINSDLX) + +TRANS(VINSBVLX, do_vinsvx_VX, 1, false, gen_helper_VINSBLX) +TRANS(VINSHVLX, do_vinsvx_VX, 2, false, gen_helper_VINSHLX) +TRANS(VINSWVLX, do_vinsvx_VX, 4, false, gen_helper_VINSWLX) + +TRANS(VINSBVRX, do_vinsvx_VX, 1, true, gen_helper_VINSBLX) +TRANS(VINSHVRX, do_vinsvx_VX, 2, true, gen_helper_VINSHLX) +TRANS(VINSWVRX, do_vinsvx_VX, 4, true, gen_helper_VINSWLX) + +TRANS(VINSERTB, do_vinsert_VX_uim4, 1, gen_helper_VINSBLX) +TRANS(VINSERTH, do_vinsert_VX_uim4, 2, gen_helper_VINSHLX) +TRANS(VINSERTW, do_vinsert_VX_uim4, 4, gen_helper_VINSWLX) +TRANS(VINSERTD, do_vinsert_VX_uim4, 8, gen_helper_VINSDLX) static void gen_vsldoi(DisasContext *ctx) { @@ -1257,6 +1425,72 @@ static void gen_vsldoi(DisasContext *ctx) tcg_temp_free_i32(sh); } +static bool trans_VSLDBI(DisasContext *ctx, arg_VN *a) +{ + TCGv_i64 t0, t1, t2; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + + get_avr64(t0, a->vra, true); + get_avr64(t1, a->vra, false); + + if (a->sh != 0) { + t2 = tcg_temp_new_i64(); + + get_avr64(t2, a->vrb, true); + + tcg_gen_extract2_i64(t0, t1, t0, 64 - a->sh); + tcg_gen_extract2_i64(t1, t2, t1, 64 - a->sh); + + tcg_temp_free_i64(t2); + } + + set_avr64(a->vrt, t0, true); + set_avr64(a->vrt, t1, false); + + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + + return true; +} + +static bool trans_VSRDBI(DisasContext *ctx, arg_VN *a) +{ + TCGv_i64 t2, t1, t0; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + + get_avr64(t0, a->vrb, false); + get_avr64(t1, a->vrb, true); + + if (a->sh != 0) { + t2 = tcg_temp_new_i64(); + + get_avr64(t2, a->vra, false); + + tcg_gen_extract2_i64(t0, t0, t1, a->sh); + tcg_gen_extract2_i64(t1, t1, t2, a->sh); + + tcg_temp_free_i64(t2); + } + + set_avr64(a->vrt, t0, false); + set_avr64(a->vrt, t1, true); + + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + + return true; +} + #define GEN_VAFORM_PAIRED(name0, name1, opc2) \ static void glue(gen_, name0##_##name1)(DisasContext *ctx) \ { \ @@ -1559,6 +1793,86 @@ GEN_VXFORM3(vpermxor, 22, 0xFF) GEN_VXFORM_DUAL(vsldoi, PPC_ALTIVEC, PPC_NONE, vpermxor, PPC_NONE, PPC2_ALTIVEC_207) +static bool trans_VCFUGED(DisasContext *ctx, arg_VX *a) +{ + static const GVecGen3 g = { + .fni8 = gen_helper_CFUGED, + .vece = MO_64, + }; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), 16, 16, &g); + + return true; +} + +static bool trans_VCLZDM(DisasContext *ctx, arg_VX *a) +{ + static const GVecGen3i g = { + .fni8 = do_cntzdm, + .vece = MO_64, + }; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), 16, 16, false, &g); + + return true; +} + +static bool trans_VCTZDM(DisasContext *ctx, arg_VX *a) +{ + static const GVecGen3i g = { + .fni8 = do_cntzdm, + .vece = MO_64, + }; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_3i(avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), 16, 16, true, &g); + + return true; +} + +static bool trans_VPDEPD(DisasContext *ctx, arg_VX *a) +{ + static const GVecGen3 g = { + .fni8 = gen_helper_PDEPD, + .vece = MO_64, + }; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), 16, 16, &g); + + return true; +} + +static bool trans_VPEXTD(DisasContext *ctx, arg_VX *a) +{ + static const GVecGen3 g = { + .fni8 = gen_helper_PEXTD, + .vece = MO_64, + }; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VECTOR(ctx); + + tcg_gen_gvec_3(avr_full_offset(a->vrt), avr_full_offset(a->vra), + avr_full_offset(a->vrb), 16, 16, &g); + + return true; +} + #undef GEN_VR_LDX #undef GEN_VR_STX #undef GEN_VR_LVE diff --git a/target/ppc/translate/vmx-ops.c.inc b/target/ppc/translate/vmx-ops.c.inc index f3f4855111..25ee715b43 100644 --- a/target/ppc/translate/vmx-ops.c.inc +++ b/target/ppc/translate/vmx-ops.c.inc @@ -225,13 +225,9 @@ GEN_VXFORM_DUAL_INV(vsplth, vextractuh, 6, 9, 0x00000000, 0x100000, GEN_VXFORM_DUAL_INV(vspltw, vextractuw, 6, 10, 0x00000000, 0x100000, PPC_ALTIVEC), GEN_VXFORM_300_EXT(vextractd, 6, 11, 0x100000), -GEN_VXFORM_DUAL_INV(vspltisb, vinsertb, 6, 12, 0x00000000, 0x100000, - PPC_ALTIVEC), -GEN_VXFORM_DUAL_INV(vspltish, vinserth, 6, 13, 0x00000000, 0x100000, - PPC_ALTIVEC), -GEN_VXFORM_DUAL_INV(vspltisw, vinsertw, 6, 14, 0x00000000, 0x100000, - PPC_ALTIVEC), -GEN_VXFORM_300_EXT(vinsertd, 6, 15, 0x100000), +GEN_VXFORM(vspltisb, 6, 12), +GEN_VXFORM(vspltish, 6, 13), +GEN_VXFORM(vspltisw, 6, 14), GEN_VXFORM_300_EO(vnegw, 0x01, 0x18, 0x06), GEN_VXFORM_300_EO(vnegd, 0x01, 0x18, 0x07), GEN_VXFORM_300_EO(vextsb2w, 0x01, 0x18, 0x10), diff --git a/target/ppc/translate/vsx-impl.c.inc b/target/ppc/translate/vsx-impl.c.inc index 57a7f73bba..c0e38060b4 100644 --- a/target/ppc/translate/vsx-impl.c.inc +++ b/target/ppc/translate/vsx-impl.c.inc @@ -1,23 +1,13 @@ /*** VSX extension ***/ -static inline void get_cpu_vsrh(TCGv_i64 dst, int n) +static inline void get_cpu_vsr(TCGv_i64 dst, int n, bool high) { - tcg_gen_ld_i64(dst, cpu_env, vsr64_offset(n, true)); + tcg_gen_ld_i64(dst, cpu_env, vsr64_offset(n, high)); } -static inline void get_cpu_vsrl(TCGv_i64 dst, int n) +static inline void set_cpu_vsr(int n, TCGv_i64 src, bool high) { - tcg_gen_ld_i64(dst, cpu_env, vsr64_offset(n, false)); -} - -static inline void set_cpu_vsrh(int n, TCGv_i64 src) -{ - tcg_gen_st_i64(src, cpu_env, vsr64_offset(n, true)); -} - -static inline void set_cpu_vsrl(int n, TCGv_i64 src) -{ - tcg_gen_st_i64(src, cpu_env, vsr64_offset(n, false)); + tcg_gen_st_i64(src, cpu_env, vsr64_offset(n, high)); } static inline TCGv_ptr gen_vsr_ptr(int reg) @@ -41,7 +31,7 @@ static void gen_##name(DisasContext *ctx) \ EA = tcg_temp_new(); \ gen_addr_reg_index(ctx, EA); \ gen_qemu_##operation(ctx, t0, EA); \ - set_cpu_vsrh(xT(ctx->opcode), t0); \ + set_cpu_vsr(xT(ctx->opcode), t0, true); \ /* NOTE: cpu_vsrl is undefined */ \ tcg_temp_free(EA); \ tcg_temp_free_i64(t0); \ @@ -67,10 +57,10 @@ static void gen_lxvd2x(DisasContext *ctx) EA = tcg_temp_new(); gen_addr_reg_index(ctx, EA); gen_qemu_ld64_i64(ctx, t0, EA); - set_cpu_vsrh(xT(ctx->opcode), t0); + set_cpu_vsr(xT(ctx->opcode), t0, true); tcg_gen_addi_tl(EA, EA, 8); gen_qemu_ld64_i64(ctx, t0, EA); - set_cpu_vsrl(xT(ctx->opcode), t0); + set_cpu_vsr(xT(ctx->opcode), t0, false); tcg_temp_free(EA); tcg_temp_free_i64(t0); } @@ -109,8 +99,8 @@ static void gen_lxvw4x(DisasContext *ctx) tcg_gen_addi_tl(EA, EA, 8); tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ); } - set_cpu_vsrh(xT(ctx->opcode), xth); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xth, true); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free(EA); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -233,8 +223,8 @@ static void gen_lxvh8x(DisasContext *ctx) if (ctx->le_mode) { gen_bswap16x8(xth, xtl, xth, xtl); } - set_cpu_vsrh(xT(ctx->opcode), xth); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xth, true); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free(EA); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -258,121 +248,13 @@ static void gen_lxvb16x(DisasContext *ctx) tcg_gen_qemu_ld_i64(xth, EA, ctx->mem_idx, MO_BEQ); tcg_gen_addi_tl(EA, EA, 8); tcg_gen_qemu_ld_i64(xtl, EA, ctx->mem_idx, MO_BEQ); - set_cpu_vsrh(xT(ctx->opcode), xth); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xth, true); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free(EA); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); } -#define VSX_VECTOR_LOAD(name, op, indexed) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - int xt; \ - TCGv EA; \ - TCGv_i64 xth; \ - TCGv_i64 xtl; \ - \ - if (indexed) { \ - xt = xT(ctx->opcode); \ - } else { \ - xt = DQxT(ctx->opcode); \ - } \ - \ - if (xt < 32) { \ - if (unlikely(!ctx->vsx_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_VSXU); \ - return; \ - } \ - } else { \ - if (unlikely(!ctx->altivec_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_VPU); \ - return; \ - } \ - } \ - xth = tcg_temp_new_i64(); \ - xtl = tcg_temp_new_i64(); \ - gen_set_access_type(ctx, ACCESS_INT); \ - EA = tcg_temp_new(); \ - if (indexed) { \ - gen_addr_reg_index(ctx, EA); \ - } else { \ - gen_addr_imm_index(ctx, EA, 0x0F); \ - } \ - if (ctx->le_mode) { \ - tcg_gen_qemu_##op(xtl, EA, ctx->mem_idx, MO_LEQ); \ - set_cpu_vsrl(xt, xtl); \ - tcg_gen_addi_tl(EA, EA, 8); \ - tcg_gen_qemu_##op(xth, EA, ctx->mem_idx, MO_LEQ); \ - set_cpu_vsrh(xt, xth); \ - } else { \ - tcg_gen_qemu_##op(xth, EA, ctx->mem_idx, MO_BEQ); \ - set_cpu_vsrh(xt, xth); \ - tcg_gen_addi_tl(EA, EA, 8); \ - tcg_gen_qemu_##op(xtl, EA, ctx->mem_idx, MO_BEQ); \ - set_cpu_vsrl(xt, xtl); \ - } \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(xth); \ - tcg_temp_free_i64(xtl); \ -} - -VSX_VECTOR_LOAD(lxv, ld_i64, 0) -VSX_VECTOR_LOAD(lxvx, ld_i64, 1) - -#define VSX_VECTOR_STORE(name, op, indexed) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - int xt; \ - TCGv EA; \ - TCGv_i64 xth; \ - TCGv_i64 xtl; \ - \ - if (indexed) { \ - xt = xT(ctx->opcode); \ - } else { \ - xt = DQxT(ctx->opcode); \ - } \ - \ - if (xt < 32) { \ - if (unlikely(!ctx->vsx_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_VSXU); \ - return; \ - } \ - } else { \ - if (unlikely(!ctx->altivec_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_VPU); \ - return; \ - } \ - } \ - xth = tcg_temp_new_i64(); \ - xtl = tcg_temp_new_i64(); \ - get_cpu_vsrh(xth, xt); \ - get_cpu_vsrl(xtl, xt); \ - gen_set_access_type(ctx, ACCESS_INT); \ - EA = tcg_temp_new(); \ - if (indexed) { \ - gen_addr_reg_index(ctx, EA); \ - } else { \ - gen_addr_imm_index(ctx, EA, 0x0F); \ - } \ - if (ctx->le_mode) { \ - tcg_gen_qemu_##op(xtl, EA, ctx->mem_idx, MO_LEQ); \ - tcg_gen_addi_tl(EA, EA, 8); \ - tcg_gen_qemu_##op(xth, EA, ctx->mem_idx, MO_LEQ); \ - } else { \ - tcg_gen_qemu_##op(xth, EA, ctx->mem_idx, MO_BEQ); \ - tcg_gen_addi_tl(EA, EA, 8); \ - tcg_gen_qemu_##op(xtl, EA, ctx->mem_idx, MO_BEQ); \ - } \ - tcg_temp_free(EA); \ - tcg_temp_free_i64(xth); \ - tcg_temp_free_i64(xtl); \ -} - -VSX_VECTOR_STORE(stxv, st_i64, 0) -VSX_VECTOR_STORE(stxvx, st_i64, 1) - #ifdef TARGET_PPC64 #define VSX_VECTOR_LOAD_STORE_LENGTH(name) \ static void gen_##name(DisasContext *ctx) \ @@ -421,7 +303,7 @@ static void gen_##name(DisasContext *ctx) \ EA = tcg_temp_new(); \ gen_addr_imm_index(ctx, EA, 0x03); \ gen_qemu_##operation(ctx, xth, EA); \ - set_cpu_vsrh(rD(ctx->opcode) + 32, xth); \ + set_cpu_vsr(rD(ctx->opcode) + 32, xth, true); \ /* NOTE: cpu_vsrl is undefined */ \ tcg_temp_free(EA); \ tcg_temp_free_i64(xth); \ @@ -443,7 +325,7 @@ static void gen_##name(DisasContext *ctx) \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(); \ gen_addr_reg_index(ctx, EA); \ - get_cpu_vsrh(t0, xS(ctx->opcode)); \ + get_cpu_vsr(t0, xS(ctx->opcode), true); \ gen_qemu_##operation(ctx, t0, EA); \ tcg_temp_free(EA); \ tcg_temp_free_i64(t0); \ @@ -468,10 +350,10 @@ static void gen_stxvd2x(DisasContext *ctx) gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(); gen_addr_reg_index(ctx, EA); - get_cpu_vsrh(t0, xS(ctx->opcode)); + get_cpu_vsr(t0, xS(ctx->opcode), true); gen_qemu_st64_i64(ctx, t0, EA); tcg_gen_addi_tl(EA, EA, 8); - get_cpu_vsrl(t0, xS(ctx->opcode)); + get_cpu_vsr(t0, xS(ctx->opcode), false); gen_qemu_st64_i64(ctx, t0, EA); tcg_temp_free(EA); tcg_temp_free_i64(t0); @@ -489,8 +371,8 @@ static void gen_stxvw4x(DisasContext *ctx) } xsh = tcg_temp_new_i64(); xsl = tcg_temp_new_i64(); - get_cpu_vsrh(xsh, xS(ctx->opcode)); - get_cpu_vsrl(xsl, xS(ctx->opcode)); + get_cpu_vsr(xsh, xS(ctx->opcode), true); + get_cpu_vsr(xsl, xS(ctx->opcode), false); gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(); gen_addr_reg_index(ctx, EA); @@ -529,8 +411,8 @@ static void gen_stxvh8x(DisasContext *ctx) } xsh = tcg_temp_new_i64(); xsl = tcg_temp_new_i64(); - get_cpu_vsrh(xsh, xS(ctx->opcode)); - get_cpu_vsrl(xsl, xS(ctx->opcode)); + get_cpu_vsr(xsh, xS(ctx->opcode), true); + get_cpu_vsr(xsl, xS(ctx->opcode), false); gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(); gen_addr_reg_index(ctx, EA); @@ -566,8 +448,8 @@ static void gen_stxvb16x(DisasContext *ctx) } xsh = tcg_temp_new_i64(); xsl = tcg_temp_new_i64(); - get_cpu_vsrh(xsh, xS(ctx->opcode)); - get_cpu_vsrl(xsl, xS(ctx->opcode)); + get_cpu_vsr(xsh, xS(ctx->opcode), true); + get_cpu_vsr(xsl, xS(ctx->opcode), false); gen_set_access_type(ctx, ACCESS_INT); EA = tcg_temp_new(); gen_addr_reg_index(ctx, EA); @@ -590,7 +472,7 @@ static void gen_##name(DisasContext *ctx) \ return; \ } \ xth = tcg_temp_new_i64(); \ - get_cpu_vsrh(xth, rD(ctx->opcode) + 32); \ + get_cpu_vsr(xth, rD(ctx->opcode) + 32, true); \ gen_set_access_type(ctx, ACCESS_INT); \ EA = tcg_temp_new(); \ gen_addr_imm_index(ctx, EA, 0x03); \ @@ -618,7 +500,7 @@ static void gen_mfvsrwz(DisasContext *ctx) } TCGv_i64 tmp = tcg_temp_new_i64(); TCGv_i64 xsh = tcg_temp_new_i64(); - get_cpu_vsrh(xsh, xS(ctx->opcode)); + get_cpu_vsr(xsh, xS(ctx->opcode), true); tcg_gen_ext32u_i64(tmp, xsh); tcg_gen_trunc_i64_tl(cpu_gpr[rA(ctx->opcode)], tmp); tcg_temp_free_i64(tmp); @@ -642,7 +524,7 @@ static void gen_mtvsrwa(DisasContext *ctx) TCGv_i64 xsh = tcg_temp_new_i64(); tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]); tcg_gen_ext32s_i64(xsh, tmp); - set_cpu_vsrh(xT(ctx->opcode), xsh); + set_cpu_vsr(xT(ctx->opcode), xsh, true); tcg_temp_free_i64(tmp); tcg_temp_free_i64(xsh); } @@ -664,7 +546,7 @@ static void gen_mtvsrwz(DisasContext *ctx) TCGv_i64 xsh = tcg_temp_new_i64(); tcg_gen_extu_tl_i64(tmp, cpu_gpr[rA(ctx->opcode)]); tcg_gen_ext32u_i64(xsh, tmp); - set_cpu_vsrh(xT(ctx->opcode), xsh); + set_cpu_vsr(xT(ctx->opcode), xsh, true); tcg_temp_free_i64(tmp); tcg_temp_free_i64(xsh); } @@ -685,7 +567,7 @@ static void gen_mfvsrd(DisasContext *ctx) } } t0 = tcg_temp_new_i64(); - get_cpu_vsrh(t0, xS(ctx->opcode)); + get_cpu_vsr(t0, xS(ctx->opcode), true); tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0); tcg_temp_free_i64(t0); } @@ -706,7 +588,7 @@ static void gen_mtvsrd(DisasContext *ctx) } t0 = tcg_temp_new_i64(); tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]); - set_cpu_vsrh(xT(ctx->opcode), t0); + set_cpu_vsr(xT(ctx->opcode), t0, true); tcg_temp_free_i64(t0); } @@ -725,7 +607,7 @@ static void gen_mfvsrld(DisasContext *ctx) } } t0 = tcg_temp_new_i64(); - get_cpu_vsrl(t0, xS(ctx->opcode)); + get_cpu_vsr(t0, xS(ctx->opcode), false); tcg_gen_mov_i64(cpu_gpr[rA(ctx->opcode)], t0); tcg_temp_free_i64(t0); } @@ -751,10 +633,10 @@ static void gen_mtvsrdd(DisasContext *ctx) } else { tcg_gen_mov_i64(t0, cpu_gpr[rA(ctx->opcode)]); } - set_cpu_vsrh(xT(ctx->opcode), t0); + set_cpu_vsr(xT(ctx->opcode), t0, true); tcg_gen_mov_i64(t0, cpu_gpr[rB(ctx->opcode)]); - set_cpu_vsrl(xT(ctx->opcode), t0); + set_cpu_vsr(xT(ctx->opcode), t0, false); tcg_temp_free_i64(t0); } @@ -776,8 +658,8 @@ static void gen_mtvsrws(DisasContext *ctx) t0 = tcg_temp_new_i64(); tcg_gen_deposit_i64(t0, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], 32, 32); - set_cpu_vsrl(xT(ctx->opcode), t0); - set_cpu_vsrh(xT(ctx->opcode), t0); + set_cpu_vsr(xT(ctx->opcode), t0, false); + set_cpu_vsr(xT(ctx->opcode), t0, true); tcg_temp_free_i64(t0); } @@ -797,33 +679,25 @@ static void gen_xxpermdi(DisasContext *ctx) if (unlikely((xT(ctx->opcode) == xA(ctx->opcode)) || (xT(ctx->opcode) == xB(ctx->opcode)))) { - if ((DM(ctx->opcode) & 2) == 0) { - get_cpu_vsrh(xh, xA(ctx->opcode)); - } else { - get_cpu_vsrl(xh, xA(ctx->opcode)); - } - if ((DM(ctx->opcode) & 1) == 0) { - get_cpu_vsrh(xl, xB(ctx->opcode)); - } else { - get_cpu_vsrl(xl, xB(ctx->opcode)); - } + get_cpu_vsr(xh, xA(ctx->opcode), (DM(ctx->opcode) & 2) == 0); + get_cpu_vsr(xl, xB(ctx->opcode), (DM(ctx->opcode) & 1) == 0); - set_cpu_vsrh(xT(ctx->opcode), xh); - set_cpu_vsrl(xT(ctx->opcode), xl); + set_cpu_vsr(xT(ctx->opcode), xh, true); + set_cpu_vsr(xT(ctx->opcode), xl, false); } else { if ((DM(ctx->opcode) & 2) == 0) { - get_cpu_vsrh(xh, xA(ctx->opcode)); - set_cpu_vsrh(xT(ctx->opcode), xh); + get_cpu_vsr(xh, xA(ctx->opcode), true); + set_cpu_vsr(xT(ctx->opcode), xh, true); } else { - get_cpu_vsrl(xh, xA(ctx->opcode)); - set_cpu_vsrh(xT(ctx->opcode), xh); + get_cpu_vsr(xh, xA(ctx->opcode), false); + set_cpu_vsr(xT(ctx->opcode), xh, true); } if ((DM(ctx->opcode) & 1) == 0) { - get_cpu_vsrh(xl, xB(ctx->opcode)); - set_cpu_vsrl(xT(ctx->opcode), xl); + get_cpu_vsr(xl, xB(ctx->opcode), true); + set_cpu_vsr(xT(ctx->opcode), xl, false); } else { - get_cpu_vsrl(xl, xB(ctx->opcode)); - set_cpu_vsrl(xT(ctx->opcode), xl); + get_cpu_vsr(xl, xB(ctx->opcode), false); + set_cpu_vsr(xT(ctx->opcode), xl, false); } } tcg_temp_free_i64(xh); @@ -847,7 +721,7 @@ static void glue(gen_, name)(DisasContext *ctx) \ } \ xb = tcg_temp_new_i64(); \ sgm = tcg_temp_new_i64(); \ - get_cpu_vsrh(xb, xB(ctx->opcode)); \ + get_cpu_vsr(xb, xB(ctx->opcode), true); \ tcg_gen_movi_i64(sgm, sgn_mask); \ switch (op) { \ case OP_ABS: { \ @@ -864,7 +738,7 @@ static void glue(gen_, name)(DisasContext *ctx) \ } \ case OP_CPSGN: { \ TCGv_i64 xa = tcg_temp_new_i64(); \ - get_cpu_vsrh(xa, xA(ctx->opcode)); \ + get_cpu_vsr(xa, xA(ctx->opcode), true); \ tcg_gen_and_i64(xa, xa, sgm); \ tcg_gen_andc_i64(xb, xb, sgm); \ tcg_gen_or_i64(xb, xb, xa); \ @@ -872,7 +746,7 @@ static void glue(gen_, name)(DisasContext *ctx) \ break; \ } \ } \ - set_cpu_vsrh(xT(ctx->opcode), xb); \ + set_cpu_vsr(xT(ctx->opcode), xb, true); \ tcg_temp_free_i64(xb); \ tcg_temp_free_i64(sgm); \ } @@ -898,8 +772,8 @@ static void glue(gen_, name)(DisasContext *ctx) \ xbl = tcg_temp_new_i64(); \ sgm = tcg_temp_new_i64(); \ tmp = tcg_temp_new_i64(); \ - get_cpu_vsrh(xbh, xb); \ - get_cpu_vsrl(xbl, xb); \ + get_cpu_vsr(xbh, xb, true); \ + get_cpu_vsr(xbl, xb, false); \ tcg_gen_movi_i64(sgm, sgn_mask); \ switch (op) { \ case OP_ABS: \ @@ -914,15 +788,15 @@ static void glue(gen_, name)(DisasContext *ctx) \ case OP_CPSGN: \ xah = tcg_temp_new_i64(); \ xa = rA(ctx->opcode) + 32; \ - get_cpu_vsrh(tmp, xa); \ + get_cpu_vsr(tmp, xa, true); \ tcg_gen_and_i64(xah, tmp, sgm); \ tcg_gen_andc_i64(xbh, xbh, sgm); \ tcg_gen_or_i64(xbh, xbh, xah); \ tcg_temp_free_i64(xah); \ break; \ } \ - set_cpu_vsrh(xt, xbh); \ - set_cpu_vsrl(xt, xbl); \ + set_cpu_vsr(xt, xbh, true); \ + set_cpu_vsr(xt, xbl, false); \ tcg_temp_free_i64(xbl); \ tcg_temp_free_i64(xbh); \ tcg_temp_free_i64(sgm); \ @@ -945,8 +819,8 @@ static void glue(gen_, name)(DisasContext *ctx) \ xbh = tcg_temp_new_i64(); \ xbl = tcg_temp_new_i64(); \ sgm = tcg_temp_new_i64(); \ - get_cpu_vsrh(xbh, xB(ctx->opcode)); \ - get_cpu_vsrl(xbl, xB(ctx->opcode)); \ + get_cpu_vsr(xbh, xB(ctx->opcode), true); \ + get_cpu_vsr(xbl, xB(ctx->opcode), false); \ tcg_gen_movi_i64(sgm, sgn_mask); \ switch (op) { \ case OP_ABS: { \ @@ -967,8 +841,8 @@ static void glue(gen_, name)(DisasContext *ctx) \ case OP_CPSGN: { \ TCGv_i64 xah = tcg_temp_new_i64(); \ TCGv_i64 xal = tcg_temp_new_i64(); \ - get_cpu_vsrh(xah, xA(ctx->opcode)); \ - get_cpu_vsrl(xal, xA(ctx->opcode)); \ + get_cpu_vsr(xah, xA(ctx->opcode), true); \ + get_cpu_vsr(xal, xA(ctx->opcode), false); \ tcg_gen_and_i64(xah, xah, sgm); \ tcg_gen_and_i64(xal, xal, sgm); \ tcg_gen_andc_i64(xbh, xbh, sgm); \ @@ -980,8 +854,8 @@ static void glue(gen_, name)(DisasContext *ctx) \ break; \ } \ } \ - set_cpu_vsrh(xT(ctx->opcode), xbh); \ - set_cpu_vsrl(xT(ctx->opcode), xbl); \ + set_cpu_vsr(xT(ctx->opcode), xbh, true); \ + set_cpu_vsr(xT(ctx->opcode), xbl, false); \ tcg_temp_free_i64(xbh); \ tcg_temp_free_i64(xbl); \ tcg_temp_free_i64(sgm); \ @@ -1193,9 +1067,9 @@ static void gen_##name(DisasContext *ctx) \ } \ t0 = tcg_temp_new_i64(); \ t1 = tcg_temp_new_i64(); \ - get_cpu_vsrh(t0, xB(ctx->opcode)); \ + get_cpu_vsr(t0, xB(ctx->opcode), true); \ gen_helper_##name(t1, cpu_env, t0); \ - set_cpu_vsrh(xT(ctx->opcode), t1); \ + set_cpu_vsr(xT(ctx->opcode), t1, true); \ tcg_temp_free_i64(t0); \ tcg_temp_free_i64(t1); \ } @@ -1390,13 +1264,13 @@ static void gen_xxbrd(DisasContext *ctx) xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); tcg_gen_bswap64_i64(xth, xbh); tcg_gen_bswap64_i64(xtl, xbl); - set_cpu_vsrh(xT(ctx->opcode), xth); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xth, true); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -1419,12 +1293,12 @@ static void gen_xxbrh(DisasContext *ctx) xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); gen_bswap16x8(xth, xtl, xbh, xbl); - set_cpu_vsrh(xT(ctx->opcode), xth); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xth, true); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -1448,15 +1322,15 @@ static void gen_xxbrq(DisasContext *ctx) xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); t0 = tcg_temp_new_i64(); tcg_gen_bswap64_i64(t0, xbl); tcg_gen_bswap64_i64(xtl, xbh); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_gen_mov_i64(xth, t0); - set_cpu_vsrh(xT(ctx->opcode), xth); + set_cpu_vsr(xT(ctx->opcode), xth, true); tcg_temp_free_i64(t0); tcg_temp_free_i64(xth); @@ -1480,12 +1354,12 @@ static void gen_xxbrw(DisasContext *ctx) xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); gen_bswap32x4(xth, xtl, xbh, xbl); - set_cpu_vsrh(xT(ctx->opcode), xth); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xth, true); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -1527,23 +1401,16 @@ static void glue(gen_, name)(DisasContext *ctx) \ b0 = tcg_temp_new_i64(); \ b1 = tcg_temp_new_i64(); \ tmp = tcg_temp_new_i64(); \ - if (high) { \ - get_cpu_vsrh(a0, xA(ctx->opcode)); \ - get_cpu_vsrh(a1, xA(ctx->opcode)); \ - get_cpu_vsrh(b0, xB(ctx->opcode)); \ - get_cpu_vsrh(b1, xB(ctx->opcode)); \ - } else { \ - get_cpu_vsrl(a0, xA(ctx->opcode)); \ - get_cpu_vsrl(a1, xA(ctx->opcode)); \ - get_cpu_vsrl(b0, xB(ctx->opcode)); \ - get_cpu_vsrl(b1, xB(ctx->opcode)); \ - } \ + get_cpu_vsr(a0, xA(ctx->opcode), high); \ + get_cpu_vsr(a1, xA(ctx->opcode), high); \ + get_cpu_vsr(b0, xB(ctx->opcode), high); \ + get_cpu_vsr(b1, xB(ctx->opcode), high); \ tcg_gen_shri_i64(a0, a0, 32); \ tcg_gen_shri_i64(b0, b0, 32); \ tcg_gen_deposit_i64(tmp, b0, a0, 32, 32); \ - set_cpu_vsrh(xT(ctx->opcode), tmp); \ + set_cpu_vsr(xT(ctx->opcode), tmp, true); \ tcg_gen_deposit_i64(tmp, b1, a1, 32, 32); \ - set_cpu_vsrl(xT(ctx->opcode), tmp); \ + set_cpu_vsr(xT(ctx->opcode), tmp, false); \ tcg_temp_free_i64(a0); \ tcg_temp_free_i64(a1); \ tcg_temp_free_i64(b0); \ @@ -1569,47 +1436,114 @@ static void gen_xxsel(DisasContext *ctx) vsr_full_offset(rb), vsr_full_offset(ra), 16, 16); } -static void gen_xxspltw(DisasContext *ctx) +static bool trans_XXSPLTW(DisasContext *ctx, arg_XX2 *a) { - int rt = xT(ctx->opcode); - int rb = xB(ctx->opcode); - int uim = UIM(ctx->opcode); int tofs, bofs; - if (unlikely(!ctx->vsx_enabled)) { - gen_exception(ctx, POWERPC_EXCP_VSXU); - return; - } + REQUIRE_VSX(ctx); - tofs = vsr_full_offset(rt); - bofs = vsr_full_offset(rb); - bofs += uim << MO_32; + tofs = vsr_full_offset(a->xt); + bofs = vsr_full_offset(a->xb); + bofs += a->uim << MO_32; #ifndef HOST_WORDS_BIG_ENDIAN bofs ^= 8 | 4; #endif tcg_gen_gvec_dup_mem(MO_32, tofs, bofs, 16, 16); + return true; } #define pattern(x) (((x) & 0xff) * (~(uint64_t)0 / 0xff)) -static void gen_xxspltib(DisasContext *ctx) +static bool trans_XXSPLTIB(DisasContext *ctx, arg_X_imm8 *a) { - uint8_t uim8 = IMM8(ctx->opcode); - int rt = xT(ctx->opcode); - - if (rt < 32) { - if (unlikely(!ctx->vsx_enabled)) { - gen_exception(ctx, POWERPC_EXCP_VSXU); - return; - } + if (a->xt < 32) { + REQUIRE_VSX(ctx); } else { - if (unlikely(!ctx->altivec_enabled)) { - gen_exception(ctx, POWERPC_EXCP_VPU); - return; - } + REQUIRE_VECTOR(ctx); } - tcg_gen_gvec_dup_imm(MO_8, vsr_full_offset(rt), 16, 16, uim8); + tcg_gen_gvec_dup_imm(MO_8, vsr_full_offset(a->xt), 16, 16, a->imm); + return true; +} + +static bool trans_XXSPLTIW(DisasContext *ctx, arg_8RR_D *a) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + + tcg_gen_gvec_dup_imm(MO_32, vsr_full_offset(a->xt), 16, 16, a->si); + + return true; +} + +static bool trans_XXSPLTIDP(DisasContext *ctx, arg_8RR_D *a) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + + tcg_gen_gvec_dup_imm(MO_64, vsr_full_offset(a->xt), 16, 16, + helper_todouble(a->si)); + return true; +} + +static bool trans_XXSPLTI32DX(DisasContext *ctx, arg_8RR_D_IX *a) +{ + TCGv_i32 imm; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + + imm = tcg_constant_i32(a->si); + + tcg_gen_st_i32(imm, cpu_env, + offsetof(CPUPPCState, vsr[a->xt].VsrW(0 + a->ix))); + tcg_gen_st_i32(imm, cpu_env, + offsetof(CPUPPCState, vsr[a->xt].VsrW(2 + a->ix))); + + return true; +} + +static bool trans_LXVKQ(DisasContext *ctx, arg_X_uim5 *a) +{ + static const uint64_t values[32] = { + 0, /* Unspecified */ + 0x3FFF000000000000llu, /* QP +1.0 */ + 0x4000000000000000llu, /* QP +2.0 */ + 0x4000800000000000llu, /* QP +3.0 */ + 0x4001000000000000llu, /* QP +4.0 */ + 0x4001400000000000llu, /* QP +5.0 */ + 0x4001800000000000llu, /* QP +6.0 */ + 0x4001C00000000000llu, /* QP +7.0 */ + 0x7FFF000000000000llu, /* QP +Inf */ + 0x7FFF800000000000llu, /* QP dQNaN */ + 0, /* Unspecified */ + 0, /* Unspecified */ + 0, /* Unspecified */ + 0, /* Unspecified */ + 0, /* Unspecified */ + 0, /* Unspecified */ + 0x8000000000000000llu, /* QP -0.0 */ + 0xBFFF000000000000llu, /* QP -1.0 */ + 0xC000000000000000llu, /* QP -2.0 */ + 0xC000800000000000llu, /* QP -3.0 */ + 0xC001000000000000llu, /* QP -4.0 */ + 0xC001400000000000llu, /* QP -5.0 */ + 0xC001800000000000llu, /* QP -6.0 */ + 0xC001C00000000000llu, /* QP -7.0 */ + 0xFFFF000000000000llu, /* QP -Inf */ + }; + + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + + if (values[a->uim]) { + set_cpu_vsr(a->xt, tcg_constant_i64(0x0), false); + set_cpu_vsr(a->xt, tcg_constant_i64(values[a->uim]), true); + } else { + gen_invalid(ctx); + } + + return true; } static void gen_xxsldwi(DisasContext *ctx) @@ -1624,40 +1558,40 @@ static void gen_xxsldwi(DisasContext *ctx) switch (SHW(ctx->opcode)) { case 0: { - get_cpu_vsrh(xth, xA(ctx->opcode)); - get_cpu_vsrl(xtl, xA(ctx->opcode)); + get_cpu_vsr(xth, xA(ctx->opcode), true); + get_cpu_vsr(xtl, xA(ctx->opcode), false); break; } case 1: { TCGv_i64 t0 = tcg_temp_new_i64(); - get_cpu_vsrh(xth, xA(ctx->opcode)); + get_cpu_vsr(xth, xA(ctx->opcode), true); tcg_gen_shli_i64(xth, xth, 32); - get_cpu_vsrl(t0, xA(ctx->opcode)); + get_cpu_vsr(t0, xA(ctx->opcode), false); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_or_i64(xth, xth, t0); - get_cpu_vsrl(xtl, xA(ctx->opcode)); + get_cpu_vsr(xtl, xA(ctx->opcode), false); tcg_gen_shli_i64(xtl, xtl, 32); - get_cpu_vsrh(t0, xB(ctx->opcode)); + get_cpu_vsr(t0, xB(ctx->opcode), true); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_or_i64(xtl, xtl, t0); tcg_temp_free_i64(t0); break; } case 2: { - get_cpu_vsrl(xth, xA(ctx->opcode)); - get_cpu_vsrh(xtl, xB(ctx->opcode)); + get_cpu_vsr(xth, xA(ctx->opcode), false); + get_cpu_vsr(xtl, xB(ctx->opcode), true); break; } case 3: { TCGv_i64 t0 = tcg_temp_new_i64(); - get_cpu_vsrl(xth, xA(ctx->opcode)); + get_cpu_vsr(xth, xA(ctx->opcode), false); tcg_gen_shli_i64(xth, xth, 32); - get_cpu_vsrh(t0, xB(ctx->opcode)); + get_cpu_vsr(t0, xB(ctx->opcode), true); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_or_i64(xth, xth, t0); - get_cpu_vsrh(xtl, xB(ctx->opcode)); + get_cpu_vsr(xtl, xB(ctx->opcode), true); tcg_gen_shli_i64(xtl, xtl, 32); - get_cpu_vsrl(t0, xB(ctx->opcode)); + get_cpu_vsr(t0, xB(ctx->opcode), false); tcg_gen_shri_i64(t0, t0, 32); tcg_gen_or_i64(xtl, xtl, t0); tcg_temp_free_i64(t0); @@ -1665,8 +1599,8 @@ static void gen_xxsldwi(DisasContext *ctx) } } - set_cpu_vsrh(xT(ctx->opcode), xth); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xth, true); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -1694,8 +1628,8 @@ static void gen_##name(DisasContext *ctx) \ */ \ if (uimm > 15) { \ tcg_gen_movi_i64(t1, 0); \ - set_cpu_vsrh(xT(ctx->opcode), t1); \ - set_cpu_vsrl(xT(ctx->opcode), t1); \ + set_cpu_vsr(xT(ctx->opcode), t1, true); \ + set_cpu_vsr(xT(ctx->opcode), t1, false); \ return; \ } \ tcg_gen_movi_i32(t0, uimm); \ @@ -1719,7 +1653,7 @@ static void gen_xsxexpdp(DisasContext *ctx) return; } t0 = tcg_temp_new_i64(); - get_cpu_vsrh(t0, xB(ctx->opcode)); + get_cpu_vsr(t0, xB(ctx->opcode), true); tcg_gen_extract_i64(rt, t0, 52, 11); tcg_temp_free_i64(t0); } @@ -1737,12 +1671,12 @@ static void gen_xsxexpqp(DisasContext *ctx) xth = tcg_temp_new_i64(); xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, rB(ctx->opcode) + 32); + get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true); tcg_gen_extract_i64(xth, xbh, 48, 15); - set_cpu_vsrh(rD(ctx->opcode) + 32, xth); + set_cpu_vsr(rD(ctx->opcode) + 32, xth, true); tcg_gen_movi_i64(xtl, 0); - set_cpu_vsrl(rD(ctx->opcode) + 32, xtl); + set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false); tcg_temp_free_i64(xbh); tcg_temp_free_i64(xth); @@ -1766,7 +1700,7 @@ static void gen_xsiexpdp(DisasContext *ctx) tcg_gen_andi_i64(t0, rb, 0x7FF); tcg_gen_shli_i64(t0, t0, 52); tcg_gen_or_i64(xth, xth, t0); - set_cpu_vsrh(xT(ctx->opcode), xth); + set_cpu_vsr(xT(ctx->opcode), xth, true); /* dword[1] is undefined */ tcg_temp_free_i64(t0); tcg_temp_free_i64(xth); @@ -1789,19 +1723,19 @@ static void gen_xsiexpqp(DisasContext *ctx) xtl = tcg_temp_new_i64(); xah = tcg_temp_new_i64(); xal = tcg_temp_new_i64(); - get_cpu_vsrh(xah, rA(ctx->opcode) + 32); - get_cpu_vsrl(xal, rA(ctx->opcode) + 32); + get_cpu_vsr(xah, rA(ctx->opcode) + 32, true); + get_cpu_vsr(xal, rA(ctx->opcode) + 32, false); xbh = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, rB(ctx->opcode) + 32); + get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true); t0 = tcg_temp_new_i64(); tcg_gen_andi_i64(xth, xah, 0x8000FFFFFFFFFFFF); tcg_gen_andi_i64(t0, xbh, 0x7FFF); tcg_gen_shli_i64(t0, t0, 48); tcg_gen_or_i64(xth, xth, t0); - set_cpu_vsrh(rD(ctx->opcode) + 32, xth); + set_cpu_vsr(rD(ctx->opcode) + 32, xth, true); tcg_gen_mov_i64(xtl, xal); - set_cpu_vsrl(rD(ctx->opcode) + 32, xtl); + set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false); tcg_temp_free_i64(t0); tcg_temp_free_i64(xth); @@ -1826,12 +1760,12 @@ static void gen_xsxsigdp(DisasContext *ctx) zr = tcg_const_i64(0); nan = tcg_const_i64(2047); - get_cpu_vsrh(t1, xB(ctx->opcode)); + get_cpu_vsr(t1, xB(ctx->opcode), true); tcg_gen_extract_i64(exp, t1, 52, 11); tcg_gen_movi_i64(t0, 0x0010000000000000); tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0); tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0); - get_cpu_vsrh(t1, xB(ctx->opcode)); + get_cpu_vsr(t1, xB(ctx->opcode), true); tcg_gen_deposit_i64(rt, t0, t1, 0, 52); tcg_temp_free_i64(t0); @@ -1857,8 +1791,8 @@ static void gen_xsxsigqp(DisasContext *ctx) xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, rB(ctx->opcode) + 32); - get_cpu_vsrl(xbl, rB(ctx->opcode) + 32); + get_cpu_vsr(xbh, rB(ctx->opcode) + 32, true); + get_cpu_vsr(xbl, rB(ctx->opcode) + 32, false); exp = tcg_temp_new_i64(); t0 = tcg_temp_new_i64(); zr = tcg_const_i64(0); @@ -1869,9 +1803,9 @@ static void gen_xsxsigqp(DisasContext *ctx) tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0); tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0); tcg_gen_deposit_i64(xth, t0, xbh, 0, 48); - set_cpu_vsrh(rD(ctx->opcode) + 32, xth); + set_cpu_vsr(rD(ctx->opcode) + 32, xth, true); tcg_gen_mov_i64(xtl, xbl); - set_cpu_vsrl(rD(ctx->opcode) + 32, xtl); + set_cpu_vsr(rD(ctx->opcode) + 32, xtl, false); tcg_temp_free_i64(t0); tcg_temp_free_i64(exp); @@ -1904,22 +1838,22 @@ static void gen_xviexpsp(DisasContext *ctx) xal = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xah, xA(ctx->opcode)); - get_cpu_vsrl(xal, xA(ctx->opcode)); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xah, xA(ctx->opcode), true); + get_cpu_vsr(xal, xA(ctx->opcode), false); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); t0 = tcg_temp_new_i64(); tcg_gen_andi_i64(xth, xah, 0x807FFFFF807FFFFF); tcg_gen_andi_i64(t0, xbh, 0xFF000000FF); tcg_gen_shli_i64(t0, t0, 23); tcg_gen_or_i64(xth, xth, t0); - set_cpu_vsrh(xT(ctx->opcode), xth); + set_cpu_vsr(xT(ctx->opcode), xth, true); tcg_gen_andi_i64(xtl, xal, 0x807FFFFF807FFFFF); tcg_gen_andi_i64(t0, xbl, 0xFF000000FF); tcg_gen_shli_i64(t0, t0, 23); tcg_gen_or_i64(xtl, xtl, t0); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(t0); tcg_temp_free_i64(xth); @@ -1949,16 +1883,16 @@ static void gen_xviexpdp(DisasContext *ctx) xal = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xah, xA(ctx->opcode)); - get_cpu_vsrl(xal, xA(ctx->opcode)); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xah, xA(ctx->opcode), true); + get_cpu_vsr(xal, xA(ctx->opcode), false); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); tcg_gen_deposit_i64(xth, xah, xbh, 52, 11); - set_cpu_vsrh(xT(ctx->opcode), xth); + set_cpu_vsr(xT(ctx->opcode), xth, true); tcg_gen_deposit_i64(xtl, xal, xbl, 52, 11); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -1983,15 +1917,15 @@ static void gen_xvxexpsp(DisasContext *ctx) xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); tcg_gen_shri_i64(xth, xbh, 23); tcg_gen_andi_i64(xth, xth, 0xFF000000FF); - set_cpu_vsrh(xT(ctx->opcode), xth); + set_cpu_vsr(xT(ctx->opcode), xth, true); tcg_gen_shri_i64(xtl, xbl, 23); tcg_gen_andi_i64(xtl, xtl, 0xFF000000FF); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -2014,13 +1948,13 @@ static void gen_xvxexpdp(DisasContext *ctx) xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); tcg_gen_extract_i64(xth, xbh, 52, 11); - set_cpu_vsrh(xT(ctx->opcode), xth); + set_cpu_vsr(xT(ctx->opcode), xth, true); tcg_gen_extract_i64(xtl, xbl, 52, 11); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(xth); tcg_temp_free_i64(xtl); @@ -2046,8 +1980,8 @@ static void gen_xvxsigdp(DisasContext *ctx) xtl = tcg_temp_new_i64(); xbh = tcg_temp_new_i64(); xbl = tcg_temp_new_i64(); - get_cpu_vsrh(xbh, xB(ctx->opcode)); - get_cpu_vsrl(xbl, xB(ctx->opcode)); + get_cpu_vsr(xbh, xB(ctx->opcode), true); + get_cpu_vsr(xbl, xB(ctx->opcode), false); exp = tcg_temp_new_i64(); t0 = tcg_temp_new_i64(); zr = tcg_const_i64(0); @@ -2058,14 +1992,14 @@ static void gen_xvxsigdp(DisasContext *ctx) tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0); tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0); tcg_gen_deposit_i64(xth, t0, xbh, 0, 52); - set_cpu_vsrh(xT(ctx->opcode), xth); + set_cpu_vsr(xT(ctx->opcode), xth, true); tcg_gen_extract_i64(exp, xbl, 52, 11); tcg_gen_movi_i64(t0, 0x0010000000000000); tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, zr, zr, t0); tcg_gen_movcond_i64(TCG_COND_EQ, t0, exp, nan, zr, t0); tcg_gen_deposit_i64(xtl, t0, xbl, 0, 52); - set_cpu_vsrl(xT(ctx->opcode), xtl); + set_cpu_vsr(xT(ctx->opcode), xtl, false); tcg_temp_free_i64(t0); tcg_temp_free_i64(exp); @@ -2077,6 +2011,180 @@ static void gen_xvxsigdp(DisasContext *ctx) tcg_temp_free_i64(xbl); } +static bool do_lstxv(DisasContext *ctx, int ra, TCGv displ, + int rt, bool store, bool paired) +{ + TCGv ea; + TCGv_i64 xt; + MemOp mop; + int rt1, rt2; + + xt = tcg_temp_new_i64(); + + mop = DEF_MEMOP(MO_Q); + + gen_set_access_type(ctx, ACCESS_INT); + ea = do_ea_calc(ctx, ra, displ); + + if (paired && ctx->le_mode) { + rt1 = rt + 1; + rt2 = rt; + } else { + rt1 = rt; + rt2 = rt + 1; + } + + if (store) { + get_cpu_vsr(xt, rt1, !ctx->le_mode); + tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop); + gen_addr_add(ctx, ea, ea, 8); + get_cpu_vsr(xt, rt1, ctx->le_mode); + tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop); + if (paired) { + gen_addr_add(ctx, ea, ea, 8); + get_cpu_vsr(xt, rt2, !ctx->le_mode); + tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop); + gen_addr_add(ctx, ea, ea, 8); + get_cpu_vsr(xt, rt2, ctx->le_mode); + tcg_gen_qemu_st_i64(xt, ea, ctx->mem_idx, mop); + } + } else { + tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop); + set_cpu_vsr(rt1, xt, !ctx->le_mode); + gen_addr_add(ctx, ea, ea, 8); + tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop); + set_cpu_vsr(rt1, xt, ctx->le_mode); + if (paired) { + gen_addr_add(ctx, ea, ea, 8); + tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop); + set_cpu_vsr(rt2, xt, !ctx->le_mode); + gen_addr_add(ctx, ea, ea, 8); + tcg_gen_qemu_ld_i64(xt, ea, ctx->mem_idx, mop); + set_cpu_vsr(rt2, xt, ctx->le_mode); + } + } + + tcg_temp_free(ea); + tcg_temp_free_i64(xt); + return true; +} + +static bool do_lstxv_D(DisasContext *ctx, arg_D *a, bool store, bool paired) +{ + if (paired) { + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + } else { + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + } + + if (paired || a->rt >= 32) { + REQUIRE_VSX(ctx); + } else { + REQUIRE_VECTOR(ctx); + } + + return do_lstxv(ctx, a->ra, tcg_constant_tl(a->si), a->rt, store, paired); +} + +static bool do_lstxv_PLS_D(DisasContext *ctx, arg_PLS_D *a, + bool store, bool paired) +{ + arg_D d; + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + REQUIRE_VSX(ctx); + + if (!resolve_PLS_D(ctx, &d, a)) { + return true; + } + + return do_lstxv(ctx, d.ra, tcg_constant_tl(d.si), d.rt, store, paired); +} + +static bool do_lstxv_X(DisasContext *ctx, arg_X *a, bool store, bool paired) +{ + if (paired) { + REQUIRE_INSNS_FLAGS2(ctx, ISA310); + } else { + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + } + + if (paired || a->rt >= 32) { + REQUIRE_VSX(ctx); + } else { + REQUIRE_VECTOR(ctx); + } + + return do_lstxv(ctx, a->ra, cpu_gpr[a->rb], a->rt, store, paired); +} + +TRANS(STXV, do_lstxv_D, true, false) +TRANS(LXV, do_lstxv_D, false, false) +TRANS(STXVP, do_lstxv_D, true, true) +TRANS(LXVP, do_lstxv_D, false, true) +TRANS(STXVX, do_lstxv_X, true, false) +TRANS(LXVX, do_lstxv_X, false, false) +TRANS(STXVPX, do_lstxv_X, true, true) +TRANS(LXVPX, do_lstxv_X, false, true) +TRANS64(PSTXV, do_lstxv_PLS_D, true, false) +TRANS64(PLXV, do_lstxv_PLS_D, false, false) +TRANS64(PSTXVP, do_lstxv_PLS_D, true, true) +TRANS64(PLXVP, do_lstxv_PLS_D, false, true) + +static void gen_xxblendv_vec(unsigned vece, TCGv_vec t, TCGv_vec a, TCGv_vec b, + TCGv_vec c) +{ + TCGv_vec tmp = tcg_temp_new_vec_matching(c); + tcg_gen_sari_vec(vece, tmp, c, (8 << vece) - 1); + tcg_gen_bitsel_vec(vece, t, tmp, b, a); + tcg_temp_free_vec(tmp); +} + +static bool do_xxblendv(DisasContext *ctx, arg_XX4 *a, unsigned vece) +{ + static const TCGOpcode vecop_list[] = { + INDEX_op_sari_vec, 0 + }; + static const GVecGen4 ops[4] = { + { + .fniv = gen_xxblendv_vec, + .fno = gen_helper_XXBLENDVB, + .opt_opc = vecop_list, + .vece = MO_8 + }, + { + .fniv = gen_xxblendv_vec, + .fno = gen_helper_XXBLENDVH, + .opt_opc = vecop_list, + .vece = MO_16 + }, + { + .fniv = gen_xxblendv_vec, + .fno = gen_helper_XXBLENDVW, + .opt_opc = vecop_list, + .vece = MO_32 + }, + { + .fniv = gen_xxblendv_vec, + .fno = gen_helper_XXBLENDVD, + .opt_opc = vecop_list, + .vece = MO_64 + } + }; + + REQUIRE_VSX(ctx); + + tcg_gen_gvec_4(vsr_full_offset(a->xt), vsr_full_offset(a->xa), + vsr_full_offset(a->xb), vsr_full_offset(a->xc), + 16, 16, &ops[vece]); + + return true; +} + +TRANS(XXBLENDVB, do_xxblendv, MO_8) +TRANS(XXBLENDVH, do_xxblendv, MO_16) +TRANS(XXBLENDVW, do_xxblendv, MO_32) +TRANS(XXBLENDVD, do_xxblendv, MO_64) + #undef GEN_XX2FORM #undef GEN_XX3FORM #undef GEN_XX2IFORM diff --git a/target/ppc/translate/vsx-ops.c.inc b/target/ppc/translate/vsx-ops.c.inc index 1d41beef26..152d1e5c3b 100644 --- a/target/ppc/translate/vsx-ops.c.inc +++ b/target/ppc/translate/vsx-ops.c.inc @@ -10,7 +10,6 @@ GEN_HANDLER_E(lxvdsx, 0x1F, 0x0C, 0x0A, 0, PPC_NONE, PPC2_VSX), GEN_HANDLER_E(lxvw4x, 0x1F, 0x0C, 0x18, 0, PPC_NONE, PPC2_VSX), GEN_HANDLER_E(lxvh8x, 0x1F, 0x0C, 0x19, 0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(lxvb16x, 0x1F, 0x0C, 0x1B, 0, PPC_NONE, PPC2_ISA300), -GEN_HANDLER_E(lxvx, 0x1F, 0x0C, 0x08, 0x00000040, PPC_NONE, PPC2_ISA300), #if defined(TARGET_PPC64) GEN_HANDLER_E(lxvl, 0x1F, 0x0D, 0x08, 0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(lxvll, 0x1F, 0x0D, 0x09, 0, PPC_NONE, PPC2_ISA300), @@ -25,7 +24,6 @@ GEN_HANDLER_E(stxvd2x, 0x1F, 0xC, 0x1E, 0, PPC_NONE, PPC2_VSX), GEN_HANDLER_E(stxvw4x, 0x1F, 0xC, 0x1C, 0, PPC_NONE, PPC2_VSX), GEN_HANDLER_E(stxvh8x, 0x1F, 0x0C, 0x1D, 0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(stxvb16x, 0x1F, 0x0C, 0x1F, 0, PPC_NONE, PPC2_ISA300), -GEN_HANDLER_E(stxvx, 0x1F, 0x0C, 0x0C, 0, PPC_NONE, PPC2_ISA300), #if defined(TARGET_PPC64) GEN_HANDLER_E(stxvl, 0x1F, 0x0D, 0x0C, 0, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(stxvll, 0x1F, 0x0D, 0x0D, 0, PPC_NONE, PPC2_ISA300), @@ -350,8 +348,6 @@ GEN_XX3FORM(xxmrghw, 0x08, 0x02, PPC2_VSX), GEN_XX3FORM(xxmrglw, 0x08, 0x06, PPC2_VSX), GEN_XX3FORM(xxperm, 0x08, 0x03, PPC2_ISA300), GEN_XX3FORM(xxpermr, 0x08, 0x07, PPC2_ISA300), -GEN_XX2FORM(xxspltw, 0x08, 0x0A, PPC2_VSX), -GEN_XX1FORM(xxspltib, 0x08, 0x0B, PPC2_ISA300), GEN_XX3FORM_DM(xxsldwi, 0x08, 0x00), GEN_XX2FORM_EXT(xxextractuw, 0x0A, 0x0A, PPC2_ISA300), GEN_XX2FORM_EXT(xxinsertw, 0x0A, 0x0B, PPC2_ISA300), diff --git a/target/ppc/user_only_helper.c b/target/ppc/user_only_helper.c index aa3f867596..7ff76f7a06 100644 --- a/target/ppc/user_only_helper.c +++ b/target/ppc/user_only_helper.c @@ -21,16 +21,23 @@ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" +#include "internal.h" - -bool ppc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) +void ppc_cpu_record_sigsegv(CPUState *cs, vaddr address, + MMUAccessType access_type, + bool maperr, uintptr_t retaddr) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; int exception, error_code; + /* + * Both DSISR and the "trap number" (exception vector offset, + * looked up from exception_index) are present in the linux-user + * signal frame. + * FIXME: we don't actually populate the trap number properly. + * It would be easiest to fill in an env->trap value now. + */ if (access_type == MMU_INST_FETCH) { exception = POWERPC_EXCP_ISI; error_code = 0x40000000; diff --git a/target/riscv/bitmanip_helper.c b/target/riscv/bitmanip_helper.c index 5b2f795d03..f1b5e5549f 100644 --- a/target/riscv/bitmanip_helper.c +++ b/target/riscv/bitmanip_helper.c @@ -3,6 +3,7 @@ * * Copyright (c) 2020 Kito Cheng, kito.cheng@sifive.com * Copyright (c) 2020 Frank Chang, frank.chang@sifive.com + * Copyright (c) 2021 Philipp Tomsich, philipp.tomsich@vrull.eu * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -23,68 +24,28 @@ #include "exec/helper-proto.h" #include "tcg/tcg.h" -static const uint64_t adjacent_masks[] = { - dup_const(MO_8, 0x55), - dup_const(MO_8, 0x33), - dup_const(MO_8, 0x0f), - dup_const(MO_16, 0xff), - dup_const(MO_32, 0xffff), - UINT32_MAX -}; - -static inline target_ulong do_swap(target_ulong x, uint64_t mask, int shift) +target_ulong HELPER(clmul)(target_ulong rs1, target_ulong rs2) { - return ((x & mask) << shift) | ((x & ~mask) >> shift); -} + target_ulong result = 0; -static target_ulong do_grev(target_ulong rs1, - target_ulong rs2, - int bits) -{ - target_ulong x = rs1; - int i, shift; - - for (i = 0, shift = 1; shift < bits; i++, shift <<= 1) { - if (rs2 & shift) { - x = do_swap(x, adjacent_masks[i], shift); + for (int i = 0; i < TARGET_LONG_BITS; i++) { + if ((rs2 >> i) & 1) { + result ^= (rs1 << i); } } - return x; + return result; } -target_ulong HELPER(grev)(target_ulong rs1, target_ulong rs2) +target_ulong HELPER(clmulr)(target_ulong rs1, target_ulong rs2) { - return do_grev(rs1, rs2, TARGET_LONG_BITS); -} + target_ulong result = 0; -target_ulong HELPER(grevw)(target_ulong rs1, target_ulong rs2) -{ - return do_grev(rs1, rs2, 32); -} - -static target_ulong do_gorc(target_ulong rs1, - target_ulong rs2, - int bits) -{ - target_ulong x = rs1; - int i, shift; - - for (i = 0, shift = 1; shift < bits; i++, shift <<= 1) { - if (rs2 & shift) { - x |= do_swap(x, adjacent_masks[i], shift); + for (int i = 0; i < TARGET_LONG_BITS; i++) { + if ((rs2 >> i) & 1) { + result ^= (rs1 >> (TARGET_LONG_BITS - i - 1)); } } - return x; -} - -target_ulong HELPER(gorc)(target_ulong rs1, target_ulong rs2) -{ - return do_gorc(rs1, rs2, TARGET_LONG_BITS); -} - -target_ulong HELPER(gorcw)(target_ulong rs1, target_ulong rs2) -{ - return do_gorc(rs1, rs2, 32); + return result; } diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index 7c626d89cd..f812998123 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -108,18 +108,10 @@ const char *riscv_cpu_get_trap_name(target_ulong cause, bool async) } } -bool riscv_cpu_is_32bit(CPURISCVState *env) +static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext) { - if (env->misa & RV64) { - return false; - } - - return true; -} - -static void set_misa(CPURISCVState *env, target_ulong misa) -{ - env->misa_mask = env->misa = misa; + env->misa_mxl_max = env->misa_mxl = mxl; + env->misa_ext_mask = env->misa_ext = ext; } static void set_priv_version(CPURISCVState *env, int priv_ver) @@ -127,11 +119,6 @@ static void set_priv_version(CPURISCVState *env, int priv_ver) env->priv_ver = priv_ver; } -static void set_bext_version(CPURISCVState *env, int bext_ver) -{ - env->bext_ver = bext_ver; -} - static void set_vext_version(CPURISCVState *env, int vext_ver) { env->vext_ver = vext_ver; @@ -153,9 +140,9 @@ static void riscv_any_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; #if defined(TARGET_RISCV32) - set_misa(env, RV32 | RVI | RVM | RVA | RVF | RVD | RVC | RVU); + set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU); #elif defined(TARGET_RISCV64) - set_misa(env, RV64 | RVI | RVM | RVA | RVF | RVD | RVC | RVU); + set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU); #endif set_priv_version(env, PRIV_VERSION_1_11_0); } @@ -165,20 +152,20 @@ static void rv64_base_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; /* We set this in the realise function */ - set_misa(env, RV64); + set_misa(env, MXL_RV64, 0); } static void rv64_sifive_u_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; - set_misa(env, RV64 | RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); + set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); set_priv_version(env, PRIV_VERSION_1_10_0); } static void rv64_sifive_e_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; - set_misa(env, RV64 | RVI | RVM | RVA | RVC | RVU); + set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU); set_priv_version(env, PRIV_VERSION_1_10_0); qdev_prop_set_bit(DEVICE(obj), "mmu", false); } @@ -187,20 +174,20 @@ static void rv32_base_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; /* We set this in the realise function */ - set_misa(env, RV32); + set_misa(env, MXL_RV32, 0); } static void rv32_sifive_u_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; - set_misa(env, RV32 | RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); + set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); set_priv_version(env, PRIV_VERSION_1_10_0); } static void rv32_sifive_e_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; - set_misa(env, RV32 | RVI | RVM | RVA | RVC | RVU); + set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU); set_priv_version(env, PRIV_VERSION_1_10_0); qdev_prop_set_bit(DEVICE(obj), "mmu", false); } @@ -208,7 +195,7 @@ static void rv32_sifive_e_cpu_init(Object *obj) static void rv32_ibex_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; - set_misa(env, RV32 | RVI | RVM | RVC | RVU); + set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU); set_priv_version(env, PRIV_VERSION_1_10_0); qdev_prop_set_bit(DEVICE(obj), "mmu", false); qdev_prop_set_bit(DEVICE(obj), "x-epmp", true); @@ -217,7 +204,7 @@ static void rv32_ibex_cpu_init(Object *obj) static void rv32_imafcu_nommu_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; - set_misa(env, RV32 | RVI | RVM | RVA | RVF | RVC | RVU); + set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU); set_priv_version(env, PRIV_VERSION_1_10_0); set_resetvec(env, DEFAULT_RSTVEC); qdev_prop_set_bit(DEVICE(obj), "mmu", false); @@ -255,55 +242,63 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) #endif qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc); #ifndef CONFIG_USER_ONLY - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mhartid ", env->mhartid); - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mstatus ", (target_ulong)env->mstatus); - if (riscv_cpu_is_32bit(env)) { - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mstatush ", - (target_ulong)(env->mstatus >> 32)); + { + static const int dump_csrs[] = { + CSR_MHARTID, + CSR_MSTATUS, + CSR_MSTATUSH, + CSR_HSTATUS, + CSR_VSSTATUS, + CSR_MIP, + CSR_MIE, + CSR_MIDELEG, + CSR_HIDELEG, + CSR_MEDELEG, + CSR_HEDELEG, + CSR_MTVEC, + CSR_STVEC, + CSR_VSTVEC, + CSR_MEPC, + CSR_SEPC, + CSR_VSEPC, + CSR_MCAUSE, + CSR_SCAUSE, + CSR_VSCAUSE, + CSR_MTVAL, + CSR_STVAL, + CSR_HTVAL, + CSR_MTVAL2, + CSR_MSCRATCH, + CSR_SSCRATCH, + CSR_SATP, + CSR_MMTE, + CSR_UPMBASE, + CSR_UPMMASK, + CSR_SPMBASE, + CSR_SPMMASK, + CSR_MPMBASE, + CSR_MPMMASK, + }; + + for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) { + int csrno = dump_csrs[i]; + target_ulong val = 0; + RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0); + + /* + * Rely on the smode, hmode, etc, predicates within csr.c + * to do the filtering of the registers that are present. + */ + if (res == RISCV_EXCP_NONE) { + qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n", + csr_ops[csrno].name, val); + } + } } - if (riscv_has_ext(env, RVH)) { - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "hstatus ", env->hstatus); - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "vsstatus ", - (target_ulong)env->vsstatus); - } - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mip ", env->mip); - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mie ", env->mie); - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mideleg ", env->mideleg); - if (riscv_has_ext(env, RVH)) { - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "hideleg ", env->hideleg); - } - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "medeleg ", env->medeleg); - if (riscv_has_ext(env, RVH)) { - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "hedeleg ", env->hedeleg); - } - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mtvec ", env->mtvec); - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "stvec ", env->stvec); - if (riscv_has_ext(env, RVH)) { - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "vstvec ", env->vstvec); - } - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mepc ", env->mepc); - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "sepc ", env->sepc); - if (riscv_has_ext(env, RVH)) { - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "vsepc ", env->vsepc); - } - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mcause ", env->mcause); - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "scause ", env->scause); - if (riscv_has_ext(env, RVH)) { - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "vscause ", env->vscause); - } - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mtval ", env->mtval); - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "stval ", env->stval); - if (riscv_has_ext(env, RVH)) { - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "htval ", env->htval); - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mtval2 ", env->mtval2); - } - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mscratch", env->mscratch); - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "sscratch", env->sscratch); - qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "satp ", env->satp); #endif for (i = 0; i < 32; i++) { - qemu_fprintf(f, " %s " TARGET_FMT_lx, + qemu_fprintf(f, " %-8s " TARGET_FMT_lx, riscv_int_regnames[i], env->gpr[i]); if ((i & 3) == 3) { qemu_fprintf(f, "\n"); @@ -311,7 +306,7 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) } if (flags & CPU_DUMP_FPU) { for (i = 0; i < 32; i++) { - qemu_fprintf(f, " %s %016" PRIx64, + qemu_fprintf(f, " %-8s %016" PRIx64, riscv_fpr_regnames[i], env->fpr[i]); if ((i & 3) == 3) { qemu_fprintf(f, "\n"); @@ -365,11 +360,22 @@ static void riscv_cpu_reset(DeviceState *dev) mcc->parent_reset(dev); #ifndef CONFIG_USER_ONLY + env->misa_mxl = env->misa_mxl_max; env->priv = PRV_M; env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV); + if (env->misa_mxl > MXL_RV32) { + /* + * The reset status of SXL/UXL is undefined, but mstatus is WARL + * and we must ensure that the value after init is valid for read. + */ + env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl); + env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl); + } env->mcause = 0; env->pc = env->resetvec; env->two_stage_lookup = false; + /* mmte is supposed to have pm.current hardwired to 1 */ + env->mmte |= (PM_EXT_INITIAL | MMTE_M_PM_CURRENT); #endif cs->exception_index = RISCV_EXCP_NONE; env->load_res = -1; @@ -379,10 +385,16 @@ static void riscv_cpu_reset(DeviceState *dev) static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info) { RISCVCPU *cpu = RISCV_CPU(s); - if (riscv_cpu_is_32bit(&cpu->env)) { + + switch (riscv_cpu_mxl(&cpu->env)) { + case MXL_RV32: info->print_insn = print_insn_riscv32; - } else { + break; + case MXL_RV64: info->print_insn = print_insn_riscv64; + break; + default: + g_assert_not_reached(); } } @@ -393,7 +405,6 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) CPURISCVState *env = &cpu->env; RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); int priv_version = 0; - target_ulong target_misa = env->misa; Error *local_err = NULL; cpu_exec_realizefn(cs, &local_err); @@ -439,8 +450,23 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) set_resetvec(env, cpu->cfg.resetvec); - /* If only XLEN is set for misa, then set misa from properties */ - if (env->misa == RV32 || env->misa == RV64) { + /* Validate that MISA_MXL is set properly. */ + switch (env->misa_mxl_max) { +#ifdef TARGET_RISCV64 + case MXL_RV64: + break; +#endif + case MXL_RV32: + break; + default: + g_assert_not_reached(); + } + assert(env->misa_mxl_max == env->misa_mxl); + + /* If only MISA_EXT is unset for misa, then set it from properties */ + if (env->misa_ext == 0) { + uint32_t ext = 0; + /* Do some ISA extension error checking */ if (cpu->cfg.ext_i && cpu->cfg.ext_e) { error_setg(errp, @@ -467,57 +493,38 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) /* Set the ISA extensions, checks should have happened above */ if (cpu->cfg.ext_i) { - target_misa |= RVI; + ext |= RVI; } if (cpu->cfg.ext_e) { - target_misa |= RVE; + ext |= RVE; } if (cpu->cfg.ext_m) { - target_misa |= RVM; + ext |= RVM; } if (cpu->cfg.ext_a) { - target_misa |= RVA; + ext |= RVA; } if (cpu->cfg.ext_f) { - target_misa |= RVF; + ext |= RVF; } if (cpu->cfg.ext_d) { - target_misa |= RVD; + ext |= RVD; } if (cpu->cfg.ext_c) { - target_misa |= RVC; + ext |= RVC; } if (cpu->cfg.ext_s) { - target_misa |= RVS; + ext |= RVS; } if (cpu->cfg.ext_u) { - target_misa |= RVU; + ext |= RVU; } if (cpu->cfg.ext_h) { - target_misa |= RVH; - } - if (cpu->cfg.ext_b) { - int bext_version = BEXT_VERSION_0_93_0; - target_misa |= RVB; - - if (cpu->cfg.bext_spec) { - if (!g_strcmp0(cpu->cfg.bext_spec, "v0.93")) { - bext_version = BEXT_VERSION_0_93_0; - } else { - error_setg(errp, - "Unsupported bitmanip spec version '%s'", - cpu->cfg.bext_spec); - return; - } - } else { - qemu_log("bitmanip version is not specified, " - "use the default value v0.93\n"); - } - set_bext_version(env, bext_version); + ext |= RVH; } if (cpu->cfg.ext_v) { int vext_version = VEXT_VERSION_0_07_1; - target_misa |= RVV; + ext |= RVV; if (!is_power_of_2(cpu->cfg.vlen)) { error_setg(errp, "Vector extension VLEN must be power of 2"); @@ -555,8 +562,11 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) } set_vext_version(env, vext_version); } + if (cpu->cfg.ext_j) { + ext |= RVJ; + } - set_misa(env, target_misa); + set_misa(env, env->misa_mxl, ext); } riscv_cpu_register_gdb_regs_for_features(cs); @@ -605,6 +615,7 @@ static void riscv_cpu_init(Object *obj) } static Property riscv_cpu_properties[] = { + /* Defaults for standard extensions */ DEFINE_PROP_BOOL("i", RISCVCPU, cfg.ext_i, true), DEFINE_PROP_BOOL("e", RISCVCPU, cfg.ext_e, false), DEFINE_PROP_BOOL("g", RISCVCPU, cfg.ext_g, true), @@ -615,20 +626,25 @@ static Property riscv_cpu_properties[] = { DEFINE_PROP_BOOL("c", RISCVCPU, cfg.ext_c, true), DEFINE_PROP_BOOL("s", RISCVCPU, cfg.ext_s, true), DEFINE_PROP_BOOL("u", RISCVCPU, cfg.ext_u, true), - /* This is experimental so mark with 'x-' */ - DEFINE_PROP_BOOL("x-b", RISCVCPU, cfg.ext_b, false), - DEFINE_PROP_BOOL("x-h", RISCVCPU, cfg.ext_h, false), - DEFINE_PROP_BOOL("x-v", RISCVCPU, cfg.ext_v, false), DEFINE_PROP_BOOL("Counters", RISCVCPU, cfg.ext_counters, true), DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true), DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true), + DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), + DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), + DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec), - DEFINE_PROP_STRING("bext_spec", RISCVCPU, cfg.bext_spec), + + /* These are experimental so mark with 'x-' */ + DEFINE_PROP_BOOL("x-zba", RISCVCPU, cfg.ext_zba, false), + DEFINE_PROP_BOOL("x-zbb", RISCVCPU, cfg.ext_zbb, false), + DEFINE_PROP_BOOL("x-zbc", RISCVCPU, cfg.ext_zbc, false), + DEFINE_PROP_BOOL("x-zbs", RISCVCPU, cfg.ext_zbs, false), + DEFINE_PROP_BOOL("x-h", RISCVCPU, cfg.ext_h, false), + DEFINE_PROP_BOOL("x-j", RISCVCPU, cfg.ext_j, false), + DEFINE_PROP_BOOL("x-v", RISCVCPU, cfg.ext_v, false), DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec), DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), - DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), - DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), /* ePMP 0.9.3 */ DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false), @@ -641,10 +657,13 @@ static gchar *riscv_gdb_arch_name(CPUState *cs) RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; - if (riscv_cpu_is_32bit(env)) { + switch (riscv_cpu_mxl(env)) { + case MXL_RV32: return g_strdup("riscv:rv32"); - } else { + case MXL_RV64: return g_strdup("riscv:rv64"); + default: + g_assert_not_reached(); } } @@ -675,9 +694,9 @@ static const struct SysemuCPUOps riscv_sysemu_ops = { static const struct TCGCPUOps riscv_tcg_ops = { .initialize = riscv_translate_init, .synchronize_from_tb = riscv_cpu_synchronize_from_tb, - .tlb_fill = riscv_cpu_tlb_fill, #ifndef CONFIG_USER_ONLY + .tlb_fill = riscv_cpu_tlb_fill, .cpu_exec_interrupt = riscv_cpu_exec_interrupt, .do_interrupt = riscv_cpu_do_interrupt, .do_transaction_failed = riscv_cpu_do_transaction_failed, @@ -727,7 +746,7 @@ char *riscv_isa_string(RISCVCPU *cpu) char *isa_str = g_new(char, maxlen); char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS); for (i = 0; i < sizeof(riscv_exts); i++) { - if (cpu->env.misa & RV(riscv_exts[i])) { + if (cpu->env.misa_ext & RV(riscv_exts[i])) { *p++ = qemu_tolower(riscv_exts[i]); } } diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 5896aca346..0760c0af93 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -25,6 +25,7 @@ #include "exec/cpu-defs.h" #include "fpu/softfloat-types.h" #include "qom/object.h" +#include "cpu_bits.h" #define TCG_GUEST_DEFAULT_MO 0 @@ -51,9 +52,6 @@ # define TYPE_RISCV_CPU_BASE TYPE_RISCV_CPU_BASE64 #endif -#define RV32 ((target_ulong)1 << (TARGET_LONG_BITS - 2)) -#define RV64 ((target_ulong)2 << (TARGET_LONG_BITS - 2)) - #define RV(x) ((target_ulong)1 << (x - 'A')) #define RVI RV('I') @@ -67,7 +65,7 @@ #define RVS RV('S') #define RVU RV('U') #define RVH RV('H') -#define RVB RV('B') +#define RVJ RV('J') /* S extension denotes that Supervisor mode exists, however it is possible to have a core that support S mode but does not have an MMU and there @@ -83,7 +81,6 @@ enum { #define PRIV_VERSION_1_10_0 0x00011000 #define PRIV_VERSION_1_11_0 0x00011100 -#define BEXT_VERSION_0_93_0 0x00009300 #define VEXT_VERSION_0_07_1 0x00000701 enum { @@ -135,8 +132,12 @@ struct CPURISCVState { target_ulong priv_ver; target_ulong bext_ver; target_ulong vext_ver; - target_ulong misa; - target_ulong misa_mask; + + /* RISCVMXL, but uint32_t for vmstate migration */ + uint32_t misa_mxl; /* current mxl */ + uint32_t misa_mxl_max; /* max mxl for this cpu */ + uint32_t misa_ext; /* current extensions */ + uint32_t misa_ext_mask; /* max ext for this cpu */ uint32_t features; @@ -237,6 +238,17 @@ struct CPURISCVState { /* True if in debugger mode. */ bool debugger; + + /* + * CSRs for PointerMasking extension + */ + target_ulong mmte; + target_ulong mpmmask; + target_ulong mpmbase; + target_ulong spmmask; + target_ulong spmbase; + target_ulong upmmask; + target_ulong upmbase; #endif float_status fp_status; @@ -288,11 +300,15 @@ struct RISCVCPU { bool ext_f; bool ext_d; bool ext_c; - bool ext_b; bool ext_s; bool ext_u; bool ext_h; + bool ext_j; bool ext_v; + bool ext_zba; + bool ext_zbb; + bool ext_zbc; + bool ext_zbs; bool ext_counters; bool ext_ifencei; bool ext_icsr; @@ -312,7 +328,7 @@ struct RISCVCPU { static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext) { - return (env->misa & ext) != 0; + return (env->misa_ext & ext) != 0; } static inline bool riscv_feature(CPURISCVState *env, int feature) @@ -321,7 +337,6 @@ static inline bool riscv_feature(CPURISCVState *env, int feature) } #include "cpu_user.h" -#include "cpu_bits.h" extern const char * const riscv_int_regnames[]; extern const char * const riscv_fpr_regnames[]; @@ -337,8 +352,6 @@ int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); bool riscv_cpu_fp_enabled(CPURISCVState *env); bool riscv_cpu_virt_enabled(CPURISCVState *env); void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable); -bool riscv_cpu_force_hs_excep_enabled(CPURISCVState *env); -void riscv_cpu_set_force_hs_excep(CPURISCVState *env, bool enable); bool riscv_cpu_two_stage_lookup(int mmu_idx); int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch); hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); @@ -377,7 +390,6 @@ void QEMU_NORETURN riscv_raise_exception(CPURISCVState *env, target_ulong riscv_cpu_get_fflags(CPURISCVState *env); void riscv_cpu_set_fflags(CPURISCVState *env, target_ulong); -#define TB_FLAGS_MMU_MASK 7 #define TB_FLAGS_PRIV_MMU_MASK 3 #define TB_FLAGS_PRIV_HYP_ACCESS_MASK (1 << 2) #define TB_FLAGS_MSTATUS_FS MSTATUS_FS @@ -386,14 +398,27 @@ typedef CPURISCVState CPUArchState; typedef RISCVCPU ArchCPU; #include "exec/cpu-all.h" -FIELD(TB_FLAGS, VL_EQ_VLMAX, 2, 1) -FIELD(TB_FLAGS, LMUL, 3, 2) -FIELD(TB_FLAGS, SEW, 5, 3) -FIELD(TB_FLAGS, VILL, 8, 1) +FIELD(TB_FLAGS, MEM_IDX, 0, 3) +FIELD(TB_FLAGS, VL_EQ_VLMAX, 3, 1) +FIELD(TB_FLAGS, LMUL, 4, 2) +FIELD(TB_FLAGS, SEW, 6, 3) +FIELD(TB_FLAGS, VILL, 9, 1) /* Is a Hypervisor instruction load/store allowed? */ -FIELD(TB_FLAGS, HLSX, 9, 1) +FIELD(TB_FLAGS, HLSX, 10, 1) +FIELD(TB_FLAGS, MSTATUS_HS_FS, 11, 2) +/* The combination of MXL/SXL/UXL that applies to the current cpu mode. */ +FIELD(TB_FLAGS, XL, 13, 2) +/* If PointerMasking should be applied */ +FIELD(TB_FLAGS, PM_ENABLED, 15, 1) -bool riscv_cpu_is_32bit(CPURISCVState *env); +#ifdef TARGET_RISCV32 +#define riscv_cpu_mxl(env) ((void)(env), MXL_RV32) +#else +static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env) +{ + return env->misa_mxl; +} +#endif /* * A simplification for VLMAX @@ -411,48 +436,8 @@ static inline uint32_t vext_get_vlmax(RISCVCPU *cpu, target_ulong vtype) return cpu->cfg.vlen >> (sew + 3 - lmul); } -static inline void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, - target_ulong *cs_base, uint32_t *pflags) -{ - uint32_t flags = 0; - - *pc = env->pc; - *cs_base = 0; - - if (riscv_has_ext(env, RVV)) { - uint32_t vlmax = vext_get_vlmax(env_archcpu(env), env->vtype); - bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl); - flags = FIELD_DP32(flags, TB_FLAGS, VILL, - FIELD_EX64(env->vtype, VTYPE, VILL)); - flags = FIELD_DP32(flags, TB_FLAGS, SEW, - FIELD_EX64(env->vtype, VTYPE, VSEW)); - flags = FIELD_DP32(flags, TB_FLAGS, LMUL, - FIELD_EX64(env->vtype, VTYPE, VLMUL)); - flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax); - } else { - flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1); - } - -#ifdef CONFIG_USER_ONLY - flags |= TB_FLAGS_MSTATUS_FS; -#else - flags |= cpu_mmu_index(env, 0); - if (riscv_cpu_fp_enabled(env)) { - flags |= env->mstatus & MSTATUS_FS; - } - - if (riscv_has_ext(env, RVH)) { - if (env->priv == PRV_M || - (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) || - (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) && - get_field(env->hstatus, HSTATUS_HU))) { - flags = FIELD_DP32(flags, TB_FLAGS, HLSX, 1); - } - } -#endif - - *pflags = flags; -} +void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, + target_ulong *cs_base, uint32_t *pflags); RISCVException riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value, diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h index 999187a9ee..9913fa9f77 100644 --- a/target/riscv/cpu_bits.h +++ b/target/riscv/cpu_bits.h @@ -334,6 +334,38 @@ #define CSR_MHPMCOUNTER30H 0xb9e #define CSR_MHPMCOUNTER31H 0xb9f +/* + * User PointerMasking registers + * NB: actual CSR numbers might be changed in future + */ +#define CSR_UMTE 0x4c0 +#define CSR_UPMMASK 0x4c1 +#define CSR_UPMBASE 0x4c2 + +/* + * Machine PointerMasking registers + * NB: actual CSR numbers might be changed in future + */ +#define CSR_MMTE 0x3c0 +#define CSR_MPMMASK 0x3c1 +#define CSR_MPMBASE 0x3c2 + +/* + * Supervisor PointerMaster registers + * NB: actual CSR numbers might be changed in future + */ +#define CSR_SMTE 0x1c0 +#define CSR_SPMMASK 0x1c1 +#define CSR_SPMBASE 0x1c2 + +/* + * Hypervisor PointerMaster registers + * NB: actual CSR numbers might be changed in future + */ +#define CSR_VSMTE 0x2c0 +#define CSR_VSPMMASK 0x2c1 +#define CSR_VSPMBASE 0x2c2 + /* mstatus CSR bits */ #define MSTATUS_UIE 0x00000001 #define MSTATUS_SIE 0x00000002 @@ -364,9 +396,11 @@ #define MISA32_MXL 0xC0000000 #define MISA64_MXL 0xC000000000000000ULL -#define MXL_RV32 1 -#define MXL_RV64 2 -#define MXL_RV128 3 +typedef enum { + MXL_RV32 = 1, + MXL_RV64 = 2, + MXL_RV128 = 3, +} RISCVMXL; /* sstatus CSR bits */ #define SSTATUS_UIE 0x00000001 @@ -410,12 +444,6 @@ /* Virtulisation Register Fields */ #define VIRT_ONOFF 1 -/* This is used to save state for when we take an exception. If this is set - * that means that we want to force a HS level exception (no matter what the - * delegation is set to). This will occur for things such as a second level - * page table fault. - */ -#define FORCE_HS_EXCEP 2 /* RV32 satp CSR field masks */ #define SATP32_MODE 0x80000000 @@ -427,14 +455,6 @@ #define SATP64_ASID 0x0FFFF00000000000ULL #define SATP64_PPN 0x00000FFFFFFFFFFFULL -/* VM modes (mstatus.vm) privileged ISA 1.9.1 */ -#define VM_1_09_MBARE 0 -#define VM_1_09_MBB 1 -#define VM_1_09_MBBID 2 -#define VM_1_09_SV32 8 -#define VM_1_09_SV39 9 -#define VM_1_09_SV48 10 - /* VM modes (satp.mode) privileged ISA 1.10 */ #define VM_1_10_MBARE 0 #define VM_1_10_SV32 1 @@ -531,4 +551,68 @@ typedef enum RISCVException { #define MIE_UTIE (1 << IRQ_U_TIMER) #define MIE_SSIE (1 << IRQ_S_SOFT) #define MIE_USIE (1 << IRQ_U_SOFT) + +/* General PointerMasking CSR bits*/ +#define PM_ENABLE 0x00000001ULL +#define PM_CURRENT 0x00000002ULL +#define PM_INSN 0x00000004ULL +#define PM_XS_MASK 0x00000003ULL + +/* PointerMasking XS bits values */ +#define PM_EXT_DISABLE 0x00000000ULL +#define PM_EXT_INITIAL 0x00000001ULL +#define PM_EXT_CLEAN 0x00000002ULL +#define PM_EXT_DIRTY 0x00000003ULL + +/* Offsets for every pair of control bits per each priv level */ +#define XS_OFFSET 0ULL +#define U_OFFSET 2ULL +#define S_OFFSET 5ULL +#define M_OFFSET 8ULL + +#define PM_XS_BITS (PM_XS_MASK << XS_OFFSET) +#define U_PM_ENABLE (PM_ENABLE << U_OFFSET) +#define U_PM_CURRENT (PM_CURRENT << U_OFFSET) +#define U_PM_INSN (PM_INSN << U_OFFSET) +#define S_PM_ENABLE (PM_ENABLE << S_OFFSET) +#define S_PM_CURRENT (PM_CURRENT << S_OFFSET) +#define S_PM_INSN (PM_INSN << S_OFFSET) +#define M_PM_ENABLE (PM_ENABLE << M_OFFSET) +#define M_PM_CURRENT (PM_CURRENT << M_OFFSET) +#define M_PM_INSN (PM_INSN << M_OFFSET) + +/* mmte CSR bits */ +#define MMTE_PM_XS_BITS PM_XS_BITS +#define MMTE_U_PM_ENABLE U_PM_ENABLE +#define MMTE_U_PM_CURRENT U_PM_CURRENT +#define MMTE_U_PM_INSN U_PM_INSN +#define MMTE_S_PM_ENABLE S_PM_ENABLE +#define MMTE_S_PM_CURRENT S_PM_CURRENT +#define MMTE_S_PM_INSN S_PM_INSN +#define MMTE_M_PM_ENABLE M_PM_ENABLE +#define MMTE_M_PM_CURRENT M_PM_CURRENT +#define MMTE_M_PM_INSN M_PM_INSN +#define MMTE_MASK (MMTE_U_PM_ENABLE | MMTE_U_PM_CURRENT | MMTE_U_PM_INSN | \ + MMTE_S_PM_ENABLE | MMTE_S_PM_CURRENT | MMTE_S_PM_INSN | \ + MMTE_M_PM_ENABLE | MMTE_M_PM_CURRENT | MMTE_M_PM_INSN | \ + MMTE_PM_XS_BITS) + +/* (v)smte CSR bits */ +#define SMTE_PM_XS_BITS PM_XS_BITS +#define SMTE_U_PM_ENABLE U_PM_ENABLE +#define SMTE_U_PM_CURRENT U_PM_CURRENT +#define SMTE_U_PM_INSN U_PM_INSN +#define SMTE_S_PM_ENABLE S_PM_ENABLE +#define SMTE_S_PM_CURRENT S_PM_CURRENT +#define SMTE_S_PM_INSN S_PM_INSN +#define SMTE_MASK (SMTE_U_PM_ENABLE | SMTE_U_PM_CURRENT | SMTE_U_PM_INSN | \ + SMTE_S_PM_ENABLE | SMTE_S_PM_CURRENT | SMTE_S_PM_INSN | \ + SMTE_PM_XS_BITS) + +/* umte CSR bits */ +#define UMTE_U_PM_ENABLE U_PM_ENABLE +#define UMTE_U_PM_CURRENT U_PM_CURRENT +#define UMTE_U_PM_INSN U_PM_INSN +#define UMTE_MASK (UMTE_U_PM_ENABLE | MMTE_U_PM_CURRENT | UMTE_U_PM_INSN) + #endif diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index d41d5cd27c..9eeed38c7e 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -35,39 +35,124 @@ int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch) #endif } +static RISCVMXL cpu_get_xl(CPURISCVState *env) +{ +#if defined(TARGET_RISCV32) + return MXL_RV32; +#elif defined(CONFIG_USER_ONLY) + return MXL_RV64; +#else + RISCVMXL xl = riscv_cpu_mxl(env); + + /* + * When emulating a 32-bit-only cpu, use RV32. + * When emulating a 64-bit cpu, and MXL has been reduced to RV32, + * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened + * back to RV64 for lower privs. + */ + if (xl != MXL_RV32) { + switch (env->priv) { + case PRV_M: + break; + case PRV_U: + xl = get_field(env->mstatus, MSTATUS64_UXL); + break; + default: /* PRV_S | PRV_H */ + xl = get_field(env->mstatus, MSTATUS64_SXL); + break; + } + } + return xl; +#endif +} + +void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, + target_ulong *cs_base, uint32_t *pflags) +{ + uint32_t flags = 0; + + *pc = env->pc; + *cs_base = 0; + + if (riscv_has_ext(env, RVV)) { + uint32_t vlmax = vext_get_vlmax(env_archcpu(env), env->vtype); + bool vl_eq_vlmax = (env->vstart == 0) && (vlmax == env->vl); + flags = FIELD_DP32(flags, TB_FLAGS, VILL, + FIELD_EX64(env->vtype, VTYPE, VILL)); + flags = FIELD_DP32(flags, TB_FLAGS, SEW, + FIELD_EX64(env->vtype, VTYPE, VSEW)); + flags = FIELD_DP32(flags, TB_FLAGS, LMUL, + FIELD_EX64(env->vtype, VTYPE, VLMUL)); + flags = FIELD_DP32(flags, TB_FLAGS, VL_EQ_VLMAX, vl_eq_vlmax); + } else { + flags = FIELD_DP32(flags, TB_FLAGS, VILL, 1); + } + +#ifdef CONFIG_USER_ONLY + flags |= TB_FLAGS_MSTATUS_FS; +#else + flags |= cpu_mmu_index(env, 0); + if (riscv_cpu_fp_enabled(env)) { + flags |= env->mstatus & MSTATUS_FS; + } + + if (riscv_has_ext(env, RVH)) { + if (env->priv == PRV_M || + (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) || + (env->priv == PRV_U && !riscv_cpu_virt_enabled(env) && + get_field(env->hstatus, HSTATUS_HU))) { + flags = FIELD_DP32(flags, TB_FLAGS, HLSX, 1); + } + + flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_FS, + get_field(env->mstatus_hs, MSTATUS_FS)); + } + if (riscv_has_ext(env, RVJ)) { + int priv = flags & TB_FLAGS_PRIV_MMU_MASK; + bool pm_enabled = false; + switch (priv) { + case PRV_U: + pm_enabled = env->mmte & U_PM_ENABLE; + break; + case PRV_S: + pm_enabled = env->mmte & S_PM_ENABLE; + break; + case PRV_M: + pm_enabled = env->mmte & M_PM_ENABLE; + break; + default: + g_assert_not_reached(); + } + flags = FIELD_DP32(flags, TB_FLAGS, PM_ENABLED, pm_enabled); + } +#endif + + flags = FIELD_DP32(flags, TB_FLAGS, XL, cpu_get_xl(env)); + + *pflags = flags; +} + #ifndef CONFIG_USER_ONLY static int riscv_cpu_local_irq_pending(CPURISCVState *env) { - target_ulong irqs; + target_ulong virt_enabled = riscv_cpu_virt_enabled(env); target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE); target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE); - target_ulong hs_mstatus_sie = get_field(env->mstatus_hs, MSTATUS_SIE); - target_ulong pending = env->mip & env->mie & - ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); - target_ulong vspending = (env->mip & env->mie & - (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP)); + target_ulong pending = env->mip & env->mie; target_ulong mie = env->priv < PRV_M || (env->priv == PRV_M && mstatus_mie); target_ulong sie = env->priv < PRV_S || (env->priv == PRV_S && mstatus_sie); - target_ulong hs_sie = env->priv < PRV_S || - (env->priv == PRV_S && hs_mstatus_sie); + target_ulong hsie = virt_enabled || sie; + target_ulong vsie = virt_enabled && sie; - if (riscv_cpu_virt_enabled(env)) { - target_ulong pending_hs_irq = pending & -hs_sie; - - if (pending_hs_irq) { - riscv_cpu_set_force_hs_excep(env, FORCE_HS_EXCEP); - return ctz64(pending_hs_irq); - } - - pending = vspending; - } - - irqs = (pending & ~env->mideleg & -mie) | (pending & env->mideleg & -sie); + target_ulong irqs = + (pending & ~env->mideleg & -mie) | + (pending & env->mideleg & ~env->hideleg & -hsie) | + (pending & env->mideleg & env->hideleg & -vsie); if (irqs) { return ctz64(irqs); /* since non-zero */ @@ -106,10 +191,9 @@ bool riscv_cpu_fp_enabled(CPURISCVState *env) void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env) { - uint64_t sd = riscv_cpu_is_32bit(env) ? MSTATUS32_SD : MSTATUS64_SD; uint64_t mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | MSTATUS_FS | MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE | - MSTATUS64_UXL | sd; + MSTATUS64_UXL; bool current_virt = riscv_cpu_virt_enabled(env); g_assert(riscv_has_ext(env, RVH)); @@ -186,24 +270,6 @@ void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable) env->virt = set_field(env->virt, VIRT_ONOFF, enable); } -bool riscv_cpu_force_hs_excep_enabled(CPURISCVState *env) -{ - if (!riscv_has_ext(env, RVH)) { - return false; - } - - return get_field(env->virt, FORCE_HS_EXCEP); -} - -void riscv_cpu_set_force_hs_excep(CPURISCVState *env, bool enable) -{ - if (!riscv_has_ext(env, RVH)) { - return; - } - - env->virt = set_field(env->virt, FORCE_HS_EXCEP, enable); -} - bool riscv_cpu_two_stage_lookup(int mmu_idx) { return mmu_idx & TB_FLAGS_PRIV_HYP_ACCESS_MASK; @@ -401,7 +467,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, if (first_stage == true) { if (use_background) { - if (riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) == MXL_RV32) { base = (hwaddr)get_field(env->vsatp, SATP32_PPN) << PGSHIFT; vm = get_field(env->vsatp, SATP32_MODE); } else { @@ -409,7 +475,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, vm = get_field(env->vsatp, SATP64_MODE); } } else { - if (riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) == MXL_RV32) { base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT; vm = get_field(env->satp, SATP32_MODE); } else { @@ -419,7 +485,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, } widened = 0; } else { - if (riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) == MXL_RV32) { base = (hwaddr)get_field(env->hgatp, SATP32_PPN) << PGSHIFT; vm = get_field(env->hgatp, SATP32_MODE); } else { @@ -512,7 +578,7 @@ restart: } target_ulong pte; - if (riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) == MXL_RV32) { pte = address_space_ldl(cs->as, pte_addr, attrs, &res); } else { pte = address_space_ldq(cs->as, pte_addr, attrs, &res); @@ -632,7 +698,7 @@ static void raise_mmu_exception(CPURISCVState *env, target_ulong address, int page_fault_exceptions, vm; uint64_t stap_mode; - if (riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) == MXL_RV32) { stap_mode = SATP32_MODE; } else { stap_mode = SATP64_MODE; @@ -748,7 +814,6 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, riscv_cpu_two_stage_lookup(mmu_idx); riscv_raise_exception(env, cs->exception_index, retaddr); } -#endif /* !CONFIG_USER_ONLY */ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, @@ -756,7 +821,6 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; -#ifndef CONFIG_USER_ONLY vaddr im_address; hwaddr pa = 0; int prot, prot2, prot_pmp; @@ -888,25 +952,8 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, } return true; - -#else - switch (access_type) { - case MMU_INST_FETCH: - cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT; - break; - case MMU_DATA_LOAD: - cs->exception_index = RISCV_EXCP_LOAD_PAGE_FAULT; - break; - case MMU_DATA_STORE: - cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT; - break; - default: - g_assert_not_reached(); - } - env->badaddr = address; - cpu_loop_exit_restore(cs, retaddr); -#endif } +#endif /* !CONFIG_USER_ONLY */ /* * Handle Traps @@ -920,7 +967,6 @@ void riscv_cpu_do_interrupt(CPUState *cs) RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; - bool force_hs_execp = riscv_cpu_force_hs_excep_enabled(env); uint64_t s; /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide @@ -949,8 +995,6 @@ void riscv_cpu_do_interrupt(CPUState *cs) case RISCV_EXCP_INST_GUEST_PAGE_FAULT: case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT: case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT: - force_hs_execp = true; - /* fallthrough */ case RISCV_EXCP_INST_ADDR_MIS: case RISCV_EXCP_INST_ACCESS_FAULT: case RISCV_EXCP_LOAD_ADDR_MIS: @@ -1009,8 +1053,7 @@ void riscv_cpu_do_interrupt(CPUState *cs) env->hstatus = set_field(env->hstatus, HSTATUS_GVA, 0); } - if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1) && - !force_hs_execp) { + if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1)) { /* Trap to VS mode */ /* * See if we need to adjust cause. Yes if its VS mode interrupt @@ -1032,7 +1075,6 @@ void riscv_cpu_do_interrupt(CPUState *cs) htval = env->guest_phys_fault_addr; riscv_cpu_set_virt_enabled(env, 0); - riscv_cpu_set_force_hs_excep(env, 0); } else { /* Trap into HS mode */ env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false); @@ -1068,7 +1110,6 @@ void riscv_cpu_do_interrupt(CPUState *cs) /* Trapping to M mode, virt is disabled */ riscv_cpu_set_virt_enabled(env, 0); - riscv_cpu_set_force_hs_excep(env, 0); } s = env->mstatus; diff --git a/target/riscv/csr.c b/target/riscv/csr.c index 23fbbd3216..9f41954894 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -39,7 +39,7 @@ static RISCVException fs(CPURISCVState *env, int csrno) { #if !defined(CONFIG_USER_ONLY) /* loose check condition for fcsr in vector extension */ - if ((csrno == CSR_FCSR) && (env->misa & RVV)) { + if ((csrno == CSR_FCSR) && (env->misa_ext & RVV)) { return RISCV_EXCP_NONE; } if (!env->debugger && !riscv_cpu_fp_enabled(env)) { @@ -51,7 +51,7 @@ static RISCVException fs(CPURISCVState *env, int csrno) static RISCVException vs(CPURISCVState *env, int csrno) { - if (env->misa & RVV) { + if (env->misa_ext & RVV) { return RISCV_EXCP_NONE; } return RISCV_EXCP_ILLEGAL_INST; @@ -95,7 +95,7 @@ static RISCVException ctr(CPURISCVState *env, int csrno) } break; } - if (riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) == MXL_RV32) { switch (csrno) { case CSR_CYCLEH: if (!get_field(env->hcounteren, COUNTEREN_CY) && @@ -130,7 +130,7 @@ static RISCVException ctr(CPURISCVState *env, int csrno) static RISCVException ctr32(CPURISCVState *env, int csrno) { - if (!riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) != MXL_RV32) { return RISCV_EXCP_ILLEGAL_INST; } @@ -145,7 +145,7 @@ static RISCVException any(CPURISCVState *env, int csrno) static RISCVException any32(CPURISCVState *env, int csrno) { - if (!riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) != MXL_RV32) { return RISCV_EXCP_ILLEGAL_INST; } @@ -180,7 +180,7 @@ static RISCVException hmode(CPURISCVState *env, int csrno) static RISCVException hmode32(CPURISCVState *env, int csrno) { - if (!riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) != MXL_RV32) { if (riscv_cpu_virt_enabled(env)) { return RISCV_EXCP_ILLEGAL_INST; } else { @@ -192,6 +192,16 @@ static RISCVException hmode32(CPURISCVState *env, int csrno) } +/* Checks if PointerMasking registers could be accessed */ +static RISCVException pointer_masking(CPURISCVState *env, int csrno) +{ + /* Check if j-ext is present */ + if (riscv_has_ext(env, RVJ)) { + return RISCV_EXCP_NONE; + } + return RISCV_EXCP_ILLEGAL_INST; +} + static RISCVException pmp(CPURISCVState *env, int csrno) { if (riscv_feature(env, RISCV_FEATURE_PMP)) { @@ -477,16 +487,34 @@ static RISCVException read_mhartid(CPURISCVState *env, int csrno, } /* Machine Trap Setup */ + +/* We do not store SD explicitly, only compute it on demand. */ +static uint64_t add_status_sd(RISCVMXL xl, uint64_t status) +{ + if ((status & MSTATUS_FS) == MSTATUS_FS || + (status & MSTATUS_XS) == MSTATUS_XS) { + switch (xl) { + case MXL_RV32: + return status | MSTATUS32_SD; + case MXL_RV64: + return status | MSTATUS64_SD; + default: + g_assert_not_reached(); + } + } + return status; +} + static RISCVException read_mstatus(CPURISCVState *env, int csrno, target_ulong *val) { - *val = env->mstatus; + *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus); return RISCV_EXCP_NONE; } static int validate_vm(CPURISCVState *env, target_ulong vm) { - if (riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) == MXL_RV32) { return valid_vm_1_10_32[vm & 0xf]; } else { return valid_vm_1_10_64[vm & 0xf]; @@ -498,7 +526,6 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno, { uint64_t mstatus = env->mstatus; uint64_t mask = 0; - int dirty; /* flush tlb on mstatus fields that affect VM */ if ((val ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | MSTATUS_MPV | @@ -510,7 +537,7 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno, MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR | MSTATUS_TW; - if (!riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) != MXL_RV32) { /* * RV32: MPV and GVA are not in mstatus. The current plan is to * add them to mstatush. For now, we just don't support it. @@ -520,12 +547,10 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno, mstatus = (mstatus & ~mask) | (val & mask); - dirty = ((mstatus & MSTATUS_FS) == MSTATUS_FS) | - ((mstatus & MSTATUS_XS) == MSTATUS_XS); - if (riscv_cpu_is_32bit(env)) { - mstatus = set_field(mstatus, MSTATUS32_SD, dirty); - } else { - mstatus = set_field(mstatus, MSTATUS64_SD, dirty); + if (riscv_cpu_mxl(env) == MXL_RV64) { + /* SXL and UXL fields are for now read only */ + mstatus = set_field(mstatus, MSTATUS64_SXL, MXL_RV64); + mstatus = set_field(mstatus, MSTATUS64_UXL, MXL_RV64); } env->mstatus = mstatus; @@ -557,7 +582,22 @@ static RISCVException write_mstatush(CPURISCVState *env, int csrno, static RISCVException read_misa(CPURISCVState *env, int csrno, target_ulong *val) { - *val = env->misa; + target_ulong misa; + + switch (env->misa_mxl) { + case MXL_RV32: + misa = (target_ulong)MXL_RV32 << 30; + break; +#ifdef TARGET_RISCV64 + case MXL_RV64: + misa = (target_ulong)MXL_RV64 << 62; + break; +#endif + default: + g_assert_not_reached(); + } + + *val = misa | env->misa_ext; return RISCV_EXCP_NONE; } @@ -583,8 +623,13 @@ static RISCVException write_misa(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } + /* + * misa.MXL writes are not supported by QEMU. + * Drop writes to those bits. + */ + /* Mask extensions that are not supported by this hart */ - val &= env->misa_mask; + val &= env->misa_ext_mask; /* Mask extensions that are not supported by QEMU */ val &= (RVI | RVE | RVM | RVA | RVF | RVD | RVC | RVS | RVU); @@ -601,20 +646,14 @@ static RISCVException write_misa(CPURISCVState *env, int csrno, val &= ~RVC; } - /* misa.MXL writes are not supported by QEMU */ - if (riscv_cpu_is_32bit(env)) { - val = (env->misa & MISA32_MXL) | (val & ~MISA32_MXL); - } else { - val = (env->misa & MISA64_MXL) | (val & ~MISA64_MXL); + /* If nothing changed, do nothing. */ + if (val == env->misa_ext) { + return RISCV_EXCP_NONE; } /* flush translation cache */ - if (val != env->misa) { - tb_flush(env_cpu(env)); - } - - env->misa = val; - + tb_flush(env_cpu(env)); + env->misa_ext = val; return RISCV_EXCP_NONE; } @@ -781,13 +820,8 @@ static RISCVException read_sstatus(CPURISCVState *env, int csrno, { target_ulong mask = (sstatus_v1_10_mask); - if (riscv_cpu_is_32bit(env)) { - mask |= SSTATUS32_SD; - } else { - mask |= SSTATUS64_SD; - } - - *val = env->mstatus & mask; + /* TODO: Use SXL not MXL. */ + *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask); return RISCV_EXCP_NONE; } @@ -992,7 +1026,7 @@ static RISCVException write_satp(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } - if (riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) == MXL_RV32) { vm = validate_vm(env, get_field(val, SATP32_MODE)); mask = (val ^ env->satp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN); asid = (val ^ env->satp) & SATP32_ASID; @@ -1020,7 +1054,7 @@ static RISCVException read_hstatus(CPURISCVState *env, int csrno, target_ulong *val) { *val = env->hstatus; - if (!riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) != MXL_RV32) { /* We only support 64-bit VSXL */ *val = set_field(*val, HSTATUS_VSXL, 2); } @@ -1033,7 +1067,7 @@ static RISCVException write_hstatus(CPURISCVState *env, int csrno, target_ulong val) { env->hstatus = val; - if (!riscv_cpu_is_32bit(env) && get_field(val, HSTATUS_VSXL) != 2) { + if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) { qemu_log_mask(LOG_UNIMP, "QEMU does not support mixed HSXLEN options."); } if (get_field(val, HSTATUS_VSBE) != 0) { @@ -1201,7 +1235,7 @@ static RISCVException write_htimedelta(CPURISCVState *env, int csrno, return RISCV_EXCP_ILLEGAL_INST; } - if (riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) == MXL_RV32) { env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val); } else { env->htimedelta = val; @@ -1401,6 +1435,268 @@ static RISCVException write_pmpaddr(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +/* + * Functions to access Pointer Masking feature registers + * We have to check if current priv lvl could modify + * csr in given mode + */ +static bool check_pm_current_disabled(CPURISCVState *env, int csrno) +{ + int csr_priv = get_field(csrno, 0x300); + int pm_current; + + /* + * If priv lvls differ that means we're accessing csr from higher priv lvl, + * so allow the access + */ + if (env->priv != csr_priv) { + return false; + } + switch (env->priv) { + case PRV_M: + pm_current = get_field(env->mmte, M_PM_CURRENT); + break; + case PRV_S: + pm_current = get_field(env->mmte, S_PM_CURRENT); + break; + case PRV_U: + pm_current = get_field(env->mmte, U_PM_CURRENT); + break; + default: + g_assert_not_reached(); + } + /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */ + return !pm_current; +} + +static RISCVException read_mmte(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->mmte & MMTE_MASK; + return RISCV_EXCP_NONE; +} + +static RISCVException write_mmte(CPURISCVState *env, int csrno, + target_ulong val) +{ + uint64_t mstatus; + target_ulong wpri_val = val & MMTE_MASK; + + if (val != wpri_val) { + qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n", + "MMTE: WPRI violation written 0x", val, + "vs expected 0x", wpri_val); + } + /* for machine mode pm.current is hardwired to 1 */ + wpri_val |= MMTE_M_PM_CURRENT; + + /* hardwiring pm.instruction bit to 0, since it's not supported yet */ + wpri_val &= ~(MMTE_M_PM_INSN | MMTE_S_PM_INSN | MMTE_U_PM_INSN); + env->mmte = wpri_val | PM_EXT_DIRTY; + + /* Set XS and SD bits, since PM CSRs are dirty */ + mstatus = env->mstatus | MSTATUS_XS; + write_mstatus(env, csrno, mstatus); + return RISCV_EXCP_NONE; +} + +static RISCVException read_smte(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->mmte & SMTE_MASK; + return RISCV_EXCP_NONE; +} + +static RISCVException write_smte(CPURISCVState *env, int csrno, + target_ulong val) +{ + target_ulong wpri_val = val & SMTE_MASK; + + if (val != wpri_val) { + qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n", + "SMTE: WPRI violation written 0x", val, + "vs expected 0x", wpri_val); + } + + /* if pm.current==0 we can't modify current PM CSRs */ + if (check_pm_current_disabled(env, csrno)) { + return RISCV_EXCP_NONE; + } + + wpri_val |= (env->mmte & ~SMTE_MASK); + write_mmte(env, csrno, wpri_val); + return RISCV_EXCP_NONE; +} + +static RISCVException read_umte(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->mmte & UMTE_MASK; + return RISCV_EXCP_NONE; +} + +static RISCVException write_umte(CPURISCVState *env, int csrno, + target_ulong val) +{ + target_ulong wpri_val = val & UMTE_MASK; + + if (val != wpri_val) { + qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n", + "UMTE: WPRI violation written 0x", val, + "vs expected 0x", wpri_val); + } + + if (check_pm_current_disabled(env, csrno)) { + return RISCV_EXCP_NONE; + } + + wpri_val |= (env->mmte & ~UMTE_MASK); + write_mmte(env, csrno, wpri_val); + return RISCV_EXCP_NONE; +} + +static RISCVException read_mpmmask(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->mpmmask; + return RISCV_EXCP_NONE; +} + +static RISCVException write_mpmmask(CPURISCVState *env, int csrno, + target_ulong val) +{ + uint64_t mstatus; + + env->mpmmask = val; + env->mmte |= PM_EXT_DIRTY; + + /* Set XS and SD bits, since PM CSRs are dirty */ + mstatus = env->mstatus | MSTATUS_XS; + write_mstatus(env, csrno, mstatus); + return RISCV_EXCP_NONE; +} + +static RISCVException read_spmmask(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->spmmask; + return RISCV_EXCP_NONE; +} + +static RISCVException write_spmmask(CPURISCVState *env, int csrno, + target_ulong val) +{ + uint64_t mstatus; + + /* if pm.current==0 we can't modify current PM CSRs */ + if (check_pm_current_disabled(env, csrno)) { + return RISCV_EXCP_NONE; + } + env->spmmask = val; + env->mmte |= PM_EXT_DIRTY; + + /* Set XS and SD bits, since PM CSRs are dirty */ + mstatus = env->mstatus | MSTATUS_XS; + write_mstatus(env, csrno, mstatus); + return RISCV_EXCP_NONE; +} + +static RISCVException read_upmmask(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->upmmask; + return RISCV_EXCP_NONE; +} + +static RISCVException write_upmmask(CPURISCVState *env, int csrno, + target_ulong val) +{ + uint64_t mstatus; + + /* if pm.current==0 we can't modify current PM CSRs */ + if (check_pm_current_disabled(env, csrno)) { + return RISCV_EXCP_NONE; + } + env->upmmask = val; + env->mmte |= PM_EXT_DIRTY; + + /* Set XS and SD bits, since PM CSRs are dirty */ + mstatus = env->mstatus | MSTATUS_XS; + write_mstatus(env, csrno, mstatus); + return RISCV_EXCP_NONE; +} + +static RISCVException read_mpmbase(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->mpmbase; + return RISCV_EXCP_NONE; +} + +static RISCVException write_mpmbase(CPURISCVState *env, int csrno, + target_ulong val) +{ + uint64_t mstatus; + + env->mpmbase = val; + env->mmte |= PM_EXT_DIRTY; + + /* Set XS and SD bits, since PM CSRs are dirty */ + mstatus = env->mstatus | MSTATUS_XS; + write_mstatus(env, csrno, mstatus); + return RISCV_EXCP_NONE; +} + +static RISCVException read_spmbase(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->spmbase; + return RISCV_EXCP_NONE; +} + +static RISCVException write_spmbase(CPURISCVState *env, int csrno, + target_ulong val) +{ + uint64_t mstatus; + + /* if pm.current==0 we can't modify current PM CSRs */ + if (check_pm_current_disabled(env, csrno)) { + return RISCV_EXCP_NONE; + } + env->spmbase = val; + env->mmte |= PM_EXT_DIRTY; + + /* Set XS and SD bits, since PM CSRs are dirty */ + mstatus = env->mstatus | MSTATUS_XS; + write_mstatus(env, csrno, mstatus); + return RISCV_EXCP_NONE; +} + +static RISCVException read_upmbase(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->upmbase; + return RISCV_EXCP_NONE; +} + +static RISCVException write_upmbase(CPURISCVState *env, int csrno, + target_ulong val) +{ + uint64_t mstatus; + + /* if pm.current==0 we can't modify current PM CSRs */ + if (check_pm_current_disabled(env, csrno)) { + return RISCV_EXCP_NONE; + } + env->upmbase = val; + env->mmte |= PM_EXT_DIRTY; + + /* Set XS and SD bits, since PM CSRs are dirty */ + mstatus = env->mstatus | MSTATUS_XS; + write_mstatus(env, csrno, mstatus); + return RISCV_EXCP_NONE; +} + #endif /* @@ -1635,6 +1931,19 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr }, [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr }, + /* User Pointer Masking */ + [CSR_UMTE] = { "umte", pointer_masking, read_umte, write_umte }, + [CSR_UPMMASK] = { "upmmask", pointer_masking, read_upmmask, write_upmmask }, + [CSR_UPMBASE] = { "upmbase", pointer_masking, read_upmbase, write_upmbase }, + /* Machine Pointer Masking */ + [CSR_MMTE] = { "mmte", pointer_masking, read_mmte, write_mmte }, + [CSR_MPMMASK] = { "mpmmask", pointer_masking, read_mpmmask, write_mpmmask }, + [CSR_MPMBASE] = { "mpmbase", pointer_masking, read_mpmbase, write_mpmbase }, + /* Supervisor Pointer Masking */ + [CSR_SMTE] = { "smte", pointer_masking, read_smte, write_smte }, + [CSR_SPMMASK] = { "spmmask", pointer_masking, read_spmmask, write_spmmask }, + [CSR_SPMBASE] = { "spmbase", pointer_masking, read_spmbase, write_spmbase }, + /* Performance Counters */ [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_zero }, [CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_zero }, diff --git a/target/riscv/fpu_helper.c b/target/riscv/fpu_helper.c index 8700516a14..d62f470900 100644 --- a/target/riscv/fpu_helper.c +++ b/target/riscv/fpu_helper.c @@ -174,14 +174,18 @@ uint64_t helper_fmin_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2) { float32 frs1 = check_nanbox_s(rs1); float32 frs2 = check_nanbox_s(rs2); - return nanbox_s(float32_minnum(frs1, frs2, &env->fp_status)); + return nanbox_s(env->priv_ver < PRIV_VERSION_1_11_0 ? + float32_minnum(frs1, frs2, &env->fp_status) : + float32_minimum_number(frs1, frs2, &env->fp_status)); } uint64_t helper_fmax_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2) { float32 frs1 = check_nanbox_s(rs1); float32 frs2 = check_nanbox_s(rs2); - return nanbox_s(float32_maxnum(frs1, frs2, &env->fp_status)); + return nanbox_s(env->priv_ver < PRIV_VERSION_1_11_0 ? + float32_maxnum(frs1, frs2, &env->fp_status) : + float32_maximum_number(frs1, frs2, &env->fp_status)); } uint64_t helper_fsqrt_s(CPURISCVState *env, uint64_t rs1) @@ -283,12 +287,16 @@ uint64_t helper_fdiv_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) uint64_t helper_fmin_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) { - return float64_minnum(frs1, frs2, &env->fp_status); + return env->priv_ver < PRIV_VERSION_1_11_0 ? + float64_minnum(frs1, frs2, &env->fp_status) : + float64_minimum_number(frs1, frs2, &env->fp_status); } uint64_t helper_fmax_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2) { - return float64_maxnum(frs1, frs2, &env->fp_status); + return env->priv_ver < PRIV_VERSION_1_11_0 ? + float64_maxnum(frs1, frs2, &env->fp_status) : + float64_maximum_number(frs1, frs2, &env->fp_status); } uint64_t helper_fcvt_s_d(CPURISCVState *env, uint64_t rs1) diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c index a7a9c0b1fe..23429179e2 100644 --- a/target/riscv/gdbstub.c +++ b/target/riscv/gdbstub.c @@ -54,10 +54,10 @@ int riscv_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) static int riscv_gdb_get_fpu(CPURISCVState *env, GByteArray *buf, int n) { if (n < 32) { - if (env->misa & RVD) { + if (env->misa_ext & RVD) { return gdb_get_reg64(buf, env->fpr[n]); } - if (env->misa & RVF) { + if (env->misa_ext & RVF) { return gdb_get_reg32(buf, env->fpr[n]); } /* there is hole between ft11 and fflags in fpu.xml */ @@ -161,7 +161,7 @@ static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg) CPURISCVState *env = &cpu->env; GString *s = g_string_new(NULL); riscv_csr_predicate_fn predicate; - int bitsize = riscv_cpu_is_32bit(env) ? 32 : 64; + int bitsize = 16 << env->misa_mxl_max; int i; g_string_printf(s, ""); @@ -191,10 +191,10 @@ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs) { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; - if (env->misa & RVD) { + if (env->misa_ext & RVD) { gdb_register_coprocessor(cs, riscv_gdb_get_fpu, riscv_gdb_set_fpu, 36, "riscv-64bit-fpu.xml", 0); - } else if (env->misa & RVF) { + } else if (env->misa_ext & RVF) { gdb_register_coprocessor(cs, riscv_gdb_get_fpu, riscv_gdb_set_fpu, 36, "riscv-32bit-fpu.xml", 0); } diff --git a/target/riscv/helper.h b/target/riscv/helper.h index 460eee9988..c7a5376227 100644 --- a/target/riscv/helper.h +++ b/target/riscv/helper.h @@ -59,10 +59,8 @@ DEF_HELPER_FLAGS_2(fcvt_d_lu, TCG_CALL_NO_RWG, i64, env, tl) DEF_HELPER_FLAGS_1(fclass_d, TCG_CALL_NO_RWG_SE, tl, i64) /* Bitmanip */ -DEF_HELPER_FLAGS_2(grev, TCG_CALL_NO_RWG_SE, tl, tl, tl) -DEF_HELPER_FLAGS_2(grevw, TCG_CALL_NO_RWG_SE, tl, tl, tl) -DEF_HELPER_FLAGS_2(gorc, TCG_CALL_NO_RWG_SE, tl, tl, tl) -DEF_HELPER_FLAGS_2(gorcw, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(clmul, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_2(clmulr, TCG_CALL_NO_RWG_SE, tl, tl, tl) /* Special functions */ DEF_HELPER_2(csrr, tl, env, int) diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode index 2cd921d51c..2f251dac1b 100644 --- a/target/riscv/insn32.decode +++ b/target/riscv/insn32.decode @@ -660,76 +660,69 @@ vamomaxd_v 10100 . . ..... ..... 111 ..... 0101111 @r_wdvm vamominud_v 11000 . . ..... ..... 111 ..... 0101111 @r_wdvm vamomaxud_v 11100 . . ..... ..... 111 ..... 0101111 @r_wdvm -# *** RV32B Standard Extension *** -clz 011000 000000 ..... 001 ..... 0010011 @r2 -ctz 011000 000001 ..... 001 ..... 0010011 @r2 -cpop 011000 000010 ..... 001 ..... 0010011 @r2 -sext_b 011000 000100 ..... 001 ..... 0010011 @r2 -sext_h 011000 000101 ..... 001 ..... 0010011 @r2 - -andn 0100000 .......... 111 ..... 0110011 @r -orn 0100000 .......... 110 ..... 0110011 @r -xnor 0100000 .......... 100 ..... 0110011 @r -pack 0000100 .......... 100 ..... 0110011 @r -packu 0100100 .......... 100 ..... 0110011 @r -packh 0000100 .......... 111 ..... 0110011 @r -min 0000101 .......... 100 ..... 0110011 @r -minu 0000101 .......... 101 ..... 0110011 @r -max 0000101 .......... 110 ..... 0110011 @r -maxu 0000101 .......... 111 ..... 0110011 @r -bset 0010100 .......... 001 ..... 0110011 @r -bclr 0100100 .......... 001 ..... 0110011 @r -binv 0110100 .......... 001 ..... 0110011 @r -bext 0100100 .......... 101 ..... 0110011 @r -slo 0010000 .......... 001 ..... 0110011 @r -sro 0010000 .......... 101 ..... 0110011 @r -ror 0110000 .......... 101 ..... 0110011 @r -rol 0110000 .......... 001 ..... 0110011 @r -grev 0110100 .......... 101 ..... 0110011 @r -gorc 0010100 .......... 101 ..... 0110011 @r +# *** RV32 Zba Standard Extension *** sh1add 0010000 .......... 010 ..... 0110011 @r sh2add 0010000 .......... 100 ..... 0110011 @r sh3add 0010000 .......... 110 ..... 0110011 @r -bseti 00101. ........... 001 ..... 0010011 @sh -bclri 01001. ........... 001 ..... 0010011 @sh -binvi 01101. ........... 001 ..... 0010011 @sh -bexti 01001. ........... 101 ..... 0010011 @sh -sloi 00100. ........... 001 ..... 0010011 @sh -sroi 00100. ........... 101 ..... 0010011 @sh -rori 01100. ........... 101 ..... 0010011 @sh -grevi 01101. ........... 101 ..... 0010011 @sh -gorci 00101. ........... 101 ..... 0010011 @sh - -# *** RV64B Standard Extension (in addition to RV32B) *** -clzw 0110000 00000 ..... 001 ..... 0011011 @r2 -ctzw 0110000 00001 ..... 001 ..... 0011011 @r2 -cpopw 0110000 00010 ..... 001 ..... 0011011 @r2 - -packw 0000100 .......... 100 ..... 0111011 @r -packuw 0100100 .......... 100 ..... 0111011 @r -bsetw 0010100 .......... 001 ..... 0111011 @r -bclrw 0100100 .......... 001 ..... 0111011 @r -binvw 0110100 .......... 001 ..... 0111011 @r -bextw 0100100 .......... 101 ..... 0111011 @r -slow 0010000 .......... 001 ..... 0111011 @r -srow 0010000 .......... 101 ..... 0111011 @r -rorw 0110000 .......... 101 ..... 0111011 @r -rolw 0110000 .......... 001 ..... 0111011 @r -grevw 0110100 .......... 101 ..... 0111011 @r -gorcw 0010100 .......... 101 ..... 0111011 @r +# *** RV64 Zba Standard Extension (in addition to RV32 Zba) *** +add_uw 0000100 .......... 000 ..... 0111011 @r sh1add_uw 0010000 .......... 010 ..... 0111011 @r sh2add_uw 0010000 .......... 100 ..... 0111011 @r sh3add_uw 0010000 .......... 110 ..... 0111011 @r -add_uw 0000100 .......... 000 ..... 0111011 @r +slli_uw 00001 ............ 001 ..... 0011011 @sh -bsetiw 0010100 .......... 001 ..... 0011011 @sh5 -bclriw 0100100 .......... 001 ..... 0011011 @sh5 -binviw 0110100 .......... 001 ..... 0011011 @sh5 -sloiw 0010000 .......... 001 ..... 0011011 @sh5 -sroiw 0010000 .......... 101 ..... 0011011 @sh5 +# *** RV32 Zbb Standard Extension *** +andn 0100000 .......... 111 ..... 0110011 @r +clz 011000 000000 ..... 001 ..... 0010011 @r2 +cpop 011000 000010 ..... 001 ..... 0010011 @r2 +ctz 011000 000001 ..... 001 ..... 0010011 @r2 +max 0000101 .......... 110 ..... 0110011 @r +maxu 0000101 .......... 111 ..... 0110011 @r +min 0000101 .......... 100 ..... 0110011 @r +minu 0000101 .......... 101 ..... 0110011 @r +orc_b 001010 000111 ..... 101 ..... 0010011 @r2 +orn 0100000 .......... 110 ..... 0110011 @r +# The encoding for rev8 differs between RV32 and RV64. +# rev8_32 denotes the RV32 variant. +rev8_32 011010 011000 ..... 101 ..... 0010011 @r2 +rol 0110000 .......... 001 ..... 0110011 @r +ror 0110000 .......... 101 ..... 0110011 @r +rori 01100 ............ 101 ..... 0010011 @sh +sext_b 011000 000100 ..... 001 ..... 0010011 @r2 +sext_h 011000 000101 ..... 001 ..... 0010011 @r2 +xnor 0100000 .......... 100 ..... 0110011 @r +# The encoding for zext.h differs between RV32 and RV64. +# zext_h_32 denotes the RV32 variant. +zext_h_32 0000100 00000 ..... 100 ..... 0110011 @r2 + +# *** RV64 Zbb Standard Extension (in addition to RV32 Zbb) *** +clzw 0110000 00000 ..... 001 ..... 0011011 @r2 +ctzw 0110000 00001 ..... 001 ..... 0011011 @r2 +cpopw 0110000 00010 ..... 001 ..... 0011011 @r2 +# The encoding for rev8 differs between RV32 and RV64. +# When executing on RV64, the encoding used in RV32 is an illegal +# instruction, so we use different handler functions to differentiate. +rev8_64 011010 111000 ..... 101 ..... 0010011 @r2 +rolw 0110000 .......... 001 ..... 0111011 @r roriw 0110000 .......... 101 ..... 0011011 @sh5 -greviw 0110100 .......... 101 ..... 0011011 @sh5 -gorciw 0010100 .......... 101 ..... 0011011 @sh5 +rorw 0110000 .......... 101 ..... 0111011 @r +# The encoding for zext.h differs between RV32 and RV64. +# When executing on RV64, the encoding used in RV32 is an illegal +# instruction, so we use different handler functions to differentiate. +zext_h_64 0000100 00000 ..... 100 ..... 0111011 @r2 -slli_uw 00001. ........... 001 ..... 0011011 @sh +# *** RV32 Zbc Standard Extension *** +clmul 0000101 .......... 001 ..... 0110011 @r +clmulh 0000101 .......... 011 ..... 0110011 @r +clmulr 0000101 .......... 010 ..... 0110011 @r + +# *** RV32 Zbs Standard Extension *** +bclr 0100100 .......... 001 ..... 0110011 @r +bclri 01001. ........... 001 ..... 0010011 @sh +bext 0100100 .......... 101 ..... 0110011 @r +bexti 01001. ........... 101 ..... 0010011 @sh +binv 0110100 .......... 001 ..... 0110011 @r +binvi 01101. ........... 001 ..... 0010011 @sh +bset 0010100 .......... 001 ..... 0110011 @r +bseti 00101. ........... 001 ..... 0010011 @sh diff --git a/target/riscv/insn_trans/trans_privileged.c.inc b/target/riscv/insn_trans/trans_privileged.c.inc index 32312be202..75c6ef80a6 100644 --- a/target/riscv/insn_trans/trans_privileged.c.inc +++ b/target/riscv/insn_trans/trans_privileged.c.inc @@ -22,8 +22,6 @@ static bool trans_ecall(DisasContext *ctx, arg_ecall *a) { /* always generates U-level ECALL, fixed in do_interrupt handler */ generate_exception(ctx, RISCV_EXCP_U_ECALL); - exit_tb(ctx); /* no chaining */ - ctx->base.is_jmp = DISAS_NORETURN; return true; } @@ -60,13 +58,11 @@ static bool trans_ebreak(DisasContext *ctx, arg_ebreak *a) post = opcode_at(&ctx->base, post_addr); } - if (pre == 0x01f01013 && ebreak == 0x00100073 && post == 0x40705013) { + if (pre == 0x01f01013 && ebreak == 0x00100073 && post == 0x40705013) { generate_exception(ctx, RISCV_EXCP_SEMIHOST); } else { generate_exception(ctx, RISCV_EXCP_BREAKPOINT); } - exit_tb(ctx); /* no chaining */ - ctx->base.is_jmp = DISAS_NORETURN; return true; } @@ -82,7 +78,7 @@ static bool trans_sret(DisasContext *ctx, arg_sret *a) if (has_ext(ctx, RVS)) { gen_helper_sret(cpu_pc, cpu_env, cpu_pc); - exit_tb(ctx); /* no chaining */ + tcg_gen_exit_tb(NULL, 0); /* no chaining */ ctx->base.is_jmp = DISAS_NORETURN; } else { return false; @@ -98,7 +94,7 @@ static bool trans_mret(DisasContext *ctx, arg_mret *a) #ifndef CONFIG_USER_ONLY tcg_gen_movi_tl(cpu_pc, ctx->base.pc_next); gen_helper_mret(cpu_pc, cpu_env, cpu_pc); - exit_tb(ctx); /* no chaining */ + tcg_gen_exit_tb(NULL, 0); /* no chaining */ ctx->base.is_jmp = DISAS_NORETURN; return true; #else diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc index 6ea07d89b0..40fe132b04 100644 --- a/target/riscv/insn_trans/trans_rva.c.inc +++ b/target/riscv/insn_trans/trans_rva.c.inc @@ -25,6 +25,7 @@ static bool gen_lr(DisasContext *ctx, arg_atomic *a, MemOp mop) if (a->rl) { tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); } + src1 = gen_pm_adjust_address(ctx, src1); tcg_gen_qemu_ld_tl(load_val, src1, ctx->mem_idx, mop); if (a->aq) { tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); @@ -44,6 +45,7 @@ static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop) TCGLabel *l2 = gen_new_label(); src1 = get_gpr(ctx, a->rs1, EXT_ZERO); + src1 = gen_pm_adjust_address(ctx, src1); tcg_gen_brcond_tl(TCG_COND_NE, load_res, src1, l1); /* @@ -84,6 +86,7 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a, TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE); TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE); + src1 = gen_pm_adjust_address(ctx, src1); func(dest, src1, src2, ctx->mem_idx, mop); gen_set_gpr(ctx, a->rd, dest); diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc index b72e76255c..c8d31907c5 100644 --- a/target/riscv/insn_trans/trans_rvb.c.inc +++ b/target/riscv/insn_trans/trans_rvb.c.inc @@ -1,8 +1,9 @@ /* - * RISC-V translation routines for the RVB Standard Extension. + * RISC-V translation routines for the Zb[abcs] Standard Extension. * * Copyright (c) 2020 Kito Cheng, kito.cheng@sifive.com * Copyright (c) 2020 Frank Chang, frank.chang@sifive.com + * Copyright (c) 2021 Philipp Tomsich, philipp.tomsich@vrull.eu * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -17,16 +18,47 @@ * this program. If not, see . */ +#define REQUIRE_ZBA(ctx) do { \ + if (!RISCV_CPU(ctx->cs)->cfg.ext_zba) { \ + return false; \ + } \ +} while (0) + +#define REQUIRE_ZBB(ctx) do { \ + if (!RISCV_CPU(ctx->cs)->cfg.ext_zbb) { \ + return false; \ + } \ +} while (0) + +#define REQUIRE_ZBC(ctx) do { \ + if (!RISCV_CPU(ctx->cs)->cfg.ext_zbc) { \ + return false; \ + } \ +} while (0) + +#define REQUIRE_ZBS(ctx) do { \ + if (!RISCV_CPU(ctx->cs)->cfg.ext_zbs) { \ + return false; \ + } \ +} while (0) static void gen_clz(TCGv ret, TCGv arg1) { tcg_gen_clzi_tl(ret, arg1, TARGET_LONG_BITS); } +static void gen_clzw(TCGv ret, TCGv arg1) +{ + TCGv t = tcg_temp_new(); + tcg_gen_shli_tl(t, arg1, 32); + tcg_gen_clzi_tl(ret, t, 32); + tcg_temp_free(t); +} + static bool trans_clz(DisasContext *ctx, arg_clz *a) { - REQUIRE_EXT(ctx, RVB); - return gen_unary(ctx, a, EXT_ZERO, gen_clz); + REQUIRE_ZBB(ctx); + return gen_unary_per_ol(ctx, a, EXT_NONE, gen_clz, gen_clzw); } static void gen_ctz(TCGv ret, TCGv arg1) @@ -34,110 +66,74 @@ static void gen_ctz(TCGv ret, TCGv arg1) tcg_gen_ctzi_tl(ret, arg1, TARGET_LONG_BITS); } +static void gen_ctzw(TCGv ret, TCGv arg1) +{ + tcg_gen_ctzi_tl(ret, arg1, 32); +} + static bool trans_ctz(DisasContext *ctx, arg_ctz *a) { - REQUIRE_EXT(ctx, RVB); - return gen_unary(ctx, a, EXT_ZERO, gen_ctz); + REQUIRE_ZBB(ctx); + return gen_unary_per_ol(ctx, a, EXT_ZERO, gen_ctz, gen_ctzw); } static bool trans_cpop(DisasContext *ctx, arg_cpop *a) { - REQUIRE_EXT(ctx, RVB); + REQUIRE_ZBB(ctx); return gen_unary(ctx, a, EXT_ZERO, tcg_gen_ctpop_tl); } static bool trans_andn(DisasContext *ctx, arg_andn *a) { - REQUIRE_EXT(ctx, RVB); + REQUIRE_ZBB(ctx); return gen_arith(ctx, a, EXT_NONE, tcg_gen_andc_tl); } static bool trans_orn(DisasContext *ctx, arg_orn *a) { - REQUIRE_EXT(ctx, RVB); + REQUIRE_ZBB(ctx); return gen_arith(ctx, a, EXT_NONE, tcg_gen_orc_tl); } static bool trans_xnor(DisasContext *ctx, arg_xnor *a) { - REQUIRE_EXT(ctx, RVB); + REQUIRE_ZBB(ctx); return gen_arith(ctx, a, EXT_NONE, tcg_gen_eqv_tl); } -static void gen_pack(TCGv ret, TCGv arg1, TCGv arg2) -{ - tcg_gen_deposit_tl(ret, arg1, arg2, - TARGET_LONG_BITS / 2, - TARGET_LONG_BITS / 2); -} - -static bool trans_pack(DisasContext *ctx, arg_pack *a) -{ - REQUIRE_EXT(ctx, RVB); - return gen_arith(ctx, a, EXT_NONE, gen_pack); -} - -static void gen_packu(TCGv ret, TCGv arg1, TCGv arg2) -{ - TCGv t = tcg_temp_new(); - tcg_gen_shri_tl(t, arg1, TARGET_LONG_BITS / 2); - tcg_gen_deposit_tl(ret, arg2, t, 0, TARGET_LONG_BITS / 2); - tcg_temp_free(t); -} - -static bool trans_packu(DisasContext *ctx, arg_packu *a) -{ - REQUIRE_EXT(ctx, RVB); - return gen_arith(ctx, a, EXT_NONE, gen_packu); -} - -static void gen_packh(TCGv ret, TCGv arg1, TCGv arg2) -{ - TCGv t = tcg_temp_new(); - tcg_gen_ext8u_tl(t, arg2); - tcg_gen_deposit_tl(ret, arg1, t, 8, TARGET_LONG_BITS - 8); - tcg_temp_free(t); -} - -static bool trans_packh(DisasContext *ctx, arg_packh *a) -{ - REQUIRE_EXT(ctx, RVB); - return gen_arith(ctx, a, EXT_NONE, gen_packh); -} - static bool trans_min(DisasContext *ctx, arg_min *a) { - REQUIRE_EXT(ctx, RVB); + REQUIRE_ZBB(ctx); return gen_arith(ctx, a, EXT_SIGN, tcg_gen_smin_tl); } static bool trans_max(DisasContext *ctx, arg_max *a) { - REQUIRE_EXT(ctx, RVB); + REQUIRE_ZBB(ctx); return gen_arith(ctx, a, EXT_SIGN, tcg_gen_smax_tl); } static bool trans_minu(DisasContext *ctx, arg_minu *a) { - REQUIRE_EXT(ctx, RVB); + REQUIRE_ZBB(ctx); return gen_arith(ctx, a, EXT_SIGN, tcg_gen_umin_tl); } static bool trans_maxu(DisasContext *ctx, arg_maxu *a) { - REQUIRE_EXT(ctx, RVB); + REQUIRE_ZBB(ctx); return gen_arith(ctx, a, EXT_SIGN, tcg_gen_umax_tl); } static bool trans_sext_b(DisasContext *ctx, arg_sext_b *a) { - REQUIRE_EXT(ctx, RVB); + REQUIRE_ZBB(ctx); return gen_unary(ctx, a, EXT_NONE, tcg_gen_ext8s_tl); } static bool trans_sext_h(DisasContext *ctx, arg_sext_h *a) { - REQUIRE_EXT(ctx, RVB); + REQUIRE_ZBB(ctx); return gen_unary(ctx, a, EXT_NONE, tcg_gen_ext16s_tl); } @@ -159,13 +155,13 @@ static void gen_bset(TCGv ret, TCGv arg1, TCGv shamt) static bool trans_bset(DisasContext *ctx, arg_bset *a) { - REQUIRE_EXT(ctx, RVB); + REQUIRE_ZBS(ctx); return gen_shift(ctx, a, EXT_NONE, gen_bset); } static bool trans_bseti(DisasContext *ctx, arg_bseti *a) { - REQUIRE_EXT(ctx, RVB); + REQUIRE_ZBS(ctx); return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_bset); } @@ -181,13 +177,13 @@ static void gen_bclr(TCGv ret, TCGv arg1, TCGv shamt) static bool trans_bclr(DisasContext *ctx, arg_bclr *a) { - REQUIRE_EXT(ctx, RVB); + REQUIRE_ZBS(ctx); return gen_shift(ctx, a, EXT_NONE, gen_bclr); } static bool trans_bclri(DisasContext *ctx, arg_bclri *a) { - REQUIRE_EXT(ctx, RVB); + REQUIRE_ZBS(ctx); return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_bclr); } @@ -203,13 +199,13 @@ static void gen_binv(TCGv ret, TCGv arg1, TCGv shamt) static bool trans_binv(DisasContext *ctx, arg_binv *a) { - REQUIRE_EXT(ctx, RVB); + REQUIRE_ZBS(ctx); return gen_shift(ctx, a, EXT_NONE, gen_binv); } static bool trans_binvi(DisasContext *ctx, arg_binvi *a) { - REQUIRE_EXT(ctx, RVB); + REQUIRE_ZBS(ctx); return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_binv); } @@ -221,285 +217,16 @@ static void gen_bext(TCGv ret, TCGv arg1, TCGv shamt) static bool trans_bext(DisasContext *ctx, arg_bext *a) { - REQUIRE_EXT(ctx, RVB); + REQUIRE_ZBS(ctx); return gen_shift(ctx, a, EXT_NONE, gen_bext); } static bool trans_bexti(DisasContext *ctx, arg_bexti *a) { - REQUIRE_EXT(ctx, RVB); + REQUIRE_ZBS(ctx); return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_bext); } -static void gen_slo(TCGv ret, TCGv arg1, TCGv arg2) -{ - tcg_gen_not_tl(ret, arg1); - tcg_gen_shl_tl(ret, ret, arg2); - tcg_gen_not_tl(ret, ret); -} - -static bool trans_slo(DisasContext *ctx, arg_slo *a) -{ - REQUIRE_EXT(ctx, RVB); - return gen_shift(ctx, a, EXT_NONE, gen_slo); -} - -static bool trans_sloi(DisasContext *ctx, arg_sloi *a) -{ - REQUIRE_EXT(ctx, RVB); - return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_slo); -} - -static void gen_sro(TCGv ret, TCGv arg1, TCGv arg2) -{ - tcg_gen_not_tl(ret, arg1); - tcg_gen_shr_tl(ret, ret, arg2); - tcg_gen_not_tl(ret, ret); -} - -static bool trans_sro(DisasContext *ctx, arg_sro *a) -{ - REQUIRE_EXT(ctx, RVB); - return gen_shift(ctx, a, EXT_ZERO, gen_sro); -} - -static bool trans_sroi(DisasContext *ctx, arg_sroi *a) -{ - REQUIRE_EXT(ctx, RVB); - return gen_shift_imm_tl(ctx, a, EXT_ZERO, gen_sro); -} - -static bool trans_ror(DisasContext *ctx, arg_ror *a) -{ - REQUIRE_EXT(ctx, RVB); - return gen_shift(ctx, a, EXT_NONE, tcg_gen_rotr_tl); -} - -static bool trans_rori(DisasContext *ctx, arg_rori *a) -{ - REQUIRE_EXT(ctx, RVB); - return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_rotri_tl); -} - -static bool trans_rol(DisasContext *ctx, arg_rol *a) -{ - REQUIRE_EXT(ctx, RVB); - return gen_shift(ctx, a, EXT_NONE, tcg_gen_rotl_tl); -} - -static bool trans_grev(DisasContext *ctx, arg_grev *a) -{ - REQUIRE_EXT(ctx, RVB); - return gen_shift(ctx, a, EXT_NONE, gen_helper_grev); -} - -static void gen_grevi(TCGv dest, TCGv src, target_long shamt) -{ - if (shamt == TARGET_LONG_BITS - 8) { - /* rev8, byte swaps */ - tcg_gen_bswap_tl(dest, src); - } else { - gen_helper_grev(dest, src, tcg_constant_tl(shamt)); - } -} - -static bool trans_grevi(DisasContext *ctx, arg_grevi *a) -{ - REQUIRE_EXT(ctx, RVB); - return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_grevi); -} - -static bool trans_gorc(DisasContext *ctx, arg_gorc *a) -{ - REQUIRE_EXT(ctx, RVB); - return gen_shift(ctx, a, EXT_ZERO, gen_helper_gorc); -} - -static bool trans_gorci(DisasContext *ctx, arg_gorci *a) -{ - REQUIRE_EXT(ctx, RVB); - return gen_shift_imm_tl(ctx, a, EXT_ZERO, gen_helper_gorc); -} - -#define GEN_SHADD(SHAMT) \ -static void gen_sh##SHAMT##add(TCGv ret, TCGv arg1, TCGv arg2) \ -{ \ - TCGv t = tcg_temp_new(); \ - \ - tcg_gen_shli_tl(t, arg1, SHAMT); \ - tcg_gen_add_tl(ret, t, arg2); \ - \ - tcg_temp_free(t); \ -} - -GEN_SHADD(1) -GEN_SHADD(2) -GEN_SHADD(3) - -#define GEN_TRANS_SHADD(SHAMT) \ -static bool trans_sh##SHAMT##add(DisasContext *ctx, arg_sh##SHAMT##add *a) \ -{ \ - REQUIRE_EXT(ctx, RVB); \ - return gen_arith(ctx, a, EXT_NONE, gen_sh##SHAMT##add); \ -} - -GEN_TRANS_SHADD(1) -GEN_TRANS_SHADD(2) -GEN_TRANS_SHADD(3) - -static void gen_clzw(TCGv ret, TCGv arg1) -{ - tcg_gen_clzi_tl(ret, ret, 64); - tcg_gen_subi_tl(ret, ret, 32); -} - -static bool trans_clzw(DisasContext *ctx, arg_clzw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_unary(ctx, a, EXT_ZERO, gen_clzw); -} - -static void gen_ctzw(TCGv ret, TCGv arg1) -{ - tcg_gen_ori_tl(ret, arg1, (target_ulong)MAKE_64BIT_MASK(32, 32)); - tcg_gen_ctzi_tl(ret, ret, 64); -} - -static bool trans_ctzw(DisasContext *ctx, arg_ctzw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_unary(ctx, a, EXT_NONE, gen_ctzw); -} - -static bool trans_cpopw(DisasContext *ctx, arg_cpopw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - ctx->w = true; - return gen_unary(ctx, a, EXT_ZERO, tcg_gen_ctpop_tl); -} - -static void gen_packw(TCGv ret, TCGv arg1, TCGv arg2) -{ - TCGv t = tcg_temp_new(); - tcg_gen_ext16s_tl(t, arg2); - tcg_gen_deposit_tl(ret, arg1, t, 16, 48); - tcg_temp_free(t); -} - -static bool trans_packw(DisasContext *ctx, arg_packw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_arith(ctx, a, EXT_NONE, gen_packw); -} - -static void gen_packuw(TCGv ret, TCGv arg1, TCGv arg2) -{ - TCGv t = tcg_temp_new(); - tcg_gen_shri_tl(t, arg1, 16); - tcg_gen_deposit_tl(ret, arg2, t, 0, 16); - tcg_gen_ext32s_tl(ret, ret); - tcg_temp_free(t); -} - -static bool trans_packuw(DisasContext *ctx, arg_packuw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - return gen_arith(ctx, a, EXT_NONE, gen_packuw); -} - -static bool trans_bsetw(DisasContext *ctx, arg_bsetw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - ctx->w = true; - return gen_shift(ctx, a, EXT_NONE, gen_bset); -} - -static bool trans_bsetiw(DisasContext *ctx, arg_bsetiw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - ctx->w = true; - return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_bset); -} - -static bool trans_bclrw(DisasContext *ctx, arg_bclrw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - ctx->w = true; - return gen_shift(ctx, a, EXT_NONE, gen_bclr); -} - -static bool trans_bclriw(DisasContext *ctx, arg_bclriw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - ctx->w = true; - return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_bclr); -} - -static bool trans_binvw(DisasContext *ctx, arg_binvw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - ctx->w = true; - return gen_shift(ctx, a, EXT_NONE, gen_binv); -} - -static bool trans_binviw(DisasContext *ctx, arg_binviw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - ctx->w = true; - return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_binv); -} - -static bool trans_bextw(DisasContext *ctx, arg_bextw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - ctx->w = true; - return gen_shift(ctx, a, EXT_NONE, gen_bext); -} - -static bool trans_slow(DisasContext *ctx, arg_slow *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - ctx->w = true; - return gen_shift(ctx, a, EXT_NONE, gen_slo); -} - -static bool trans_sloiw(DisasContext *ctx, arg_sloiw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - ctx->w = true; - return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_slo); -} - -static bool trans_srow(DisasContext *ctx, arg_srow *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - ctx->w = true; - return gen_shift(ctx, a, EXT_ZERO, gen_sro); -} - -static bool trans_sroiw(DisasContext *ctx, arg_sroiw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - ctx->w = true; - return gen_shift_imm_tl(ctx, a, EXT_ZERO, gen_sro); -} - static void gen_rorw(TCGv ret, TCGv arg1, TCGv arg2) { TCGv_i32 t1 = tcg_temp_new_i32(); @@ -518,20 +245,28 @@ static void gen_rorw(TCGv ret, TCGv arg1, TCGv arg2) tcg_temp_free_i32(t2); } -static bool trans_rorw(DisasContext *ctx, arg_rorw *a) +static bool trans_ror(DisasContext *ctx, arg_ror *a) { - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - ctx->w = true; - return gen_shift(ctx, a, EXT_NONE, gen_rorw); + REQUIRE_ZBB(ctx); + return gen_shift_per_ol(ctx, a, EXT_NONE, tcg_gen_rotr_tl, gen_rorw); } -static bool trans_roriw(DisasContext *ctx, arg_roriw *a) +static void gen_roriw(TCGv ret, TCGv arg1, target_long shamt) { - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - ctx->w = true; - return gen_shift_imm_tl(ctx, a, EXT_NONE, gen_rorw); + TCGv_i32 t1 = tcg_temp_new_i32(); + + tcg_gen_trunc_tl_i32(t1, arg1); + tcg_gen_rotri_i32(t1, t1, shamt); + tcg_gen_ext_i32_tl(ret, t1); + + tcg_temp_free_i32(t1); +} + +static bool trans_rori(DisasContext *ctx, arg_rori *a) +{ + REQUIRE_ZBB(ctx); + return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE, + tcg_gen_rotri_tl, gen_roriw); } static void gen_rolw(TCGv ret, TCGv arg1, TCGv arg2) @@ -552,46 +287,143 @@ static void gen_rolw(TCGv ret, TCGv arg1, TCGv arg2) tcg_temp_free_i32(t2); } +static bool trans_rol(DisasContext *ctx, arg_rol *a) +{ + REQUIRE_ZBB(ctx); + return gen_shift_per_ol(ctx, a, EXT_NONE, tcg_gen_rotl_tl, gen_rolw); +} + +static void gen_rev8_32(TCGv ret, TCGv src1) +{ + tcg_gen_bswap32_tl(ret, src1, TCG_BSWAP_OS); +} + +static bool trans_rev8_32(DisasContext *ctx, arg_rev8_32 *a) +{ + REQUIRE_32BIT(ctx); + REQUIRE_ZBB(ctx); + return gen_unary(ctx, a, EXT_NONE, gen_rev8_32); +} + +static bool trans_rev8_64(DisasContext *ctx, arg_rev8_64 *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_ZBB(ctx); + return gen_unary(ctx, a, EXT_NONE, tcg_gen_bswap_tl); +} + +static void gen_orc_b(TCGv ret, TCGv source1) +{ + TCGv tmp = tcg_temp_new(); + TCGv low7 = tcg_constant_tl(dup_const_tl(MO_8, 0x7f)); + + /* Set msb in each byte if the byte was non-zero. */ + tcg_gen_and_tl(tmp, source1, low7); + tcg_gen_add_tl(tmp, tmp, low7); + tcg_gen_or_tl(tmp, tmp, source1); + + /* Extract the msb to the lsb in each byte */ + tcg_gen_andc_tl(tmp, tmp, low7); + tcg_gen_shri_tl(tmp, tmp, 7); + + /* Replicate the lsb of each byte across the byte. */ + tcg_gen_muli_tl(ret, tmp, 0xff); + + tcg_temp_free(tmp); +} + +static bool trans_orc_b(DisasContext *ctx, arg_orc_b *a) +{ + REQUIRE_ZBB(ctx); + return gen_unary(ctx, a, EXT_ZERO, gen_orc_b); +} + +#define GEN_SHADD(SHAMT) \ +static void gen_sh##SHAMT##add(TCGv ret, TCGv arg1, TCGv arg2) \ +{ \ + TCGv t = tcg_temp_new(); \ + \ + tcg_gen_shli_tl(t, arg1, SHAMT); \ + tcg_gen_add_tl(ret, t, arg2); \ + \ + tcg_temp_free(t); \ +} + +GEN_SHADD(1) +GEN_SHADD(2) +GEN_SHADD(3) + +#define GEN_TRANS_SHADD(SHAMT) \ +static bool trans_sh##SHAMT##add(DisasContext *ctx, arg_sh##SHAMT##add *a) \ +{ \ + REQUIRE_ZBA(ctx); \ + return gen_arith(ctx, a, EXT_NONE, gen_sh##SHAMT##add); \ +} + +GEN_TRANS_SHADD(1) +GEN_TRANS_SHADD(2) +GEN_TRANS_SHADD(3) + +static bool trans_zext_h_32(DisasContext *ctx, arg_zext_h_32 *a) +{ + REQUIRE_32BIT(ctx); + REQUIRE_ZBB(ctx); + return gen_unary(ctx, a, EXT_NONE, tcg_gen_ext16u_tl); +} + +static bool trans_zext_h_64(DisasContext *ctx, arg_zext_h_64 *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_ZBB(ctx); + return gen_unary(ctx, a, EXT_NONE, tcg_gen_ext16u_tl); +} + +static bool trans_clzw(DisasContext *ctx, arg_clzw *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_ZBB(ctx); + return gen_unary(ctx, a, EXT_NONE, gen_clzw); +} + +static bool trans_ctzw(DisasContext *ctx, arg_ctzw *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_ZBB(ctx); + return gen_unary(ctx, a, EXT_ZERO, gen_ctzw); +} + +static bool trans_cpopw(DisasContext *ctx, arg_cpopw *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_ZBB(ctx); + ctx->ol = MXL_RV32; + return gen_unary(ctx, a, EXT_ZERO, tcg_gen_ctpop_tl); +} + +static bool trans_rorw(DisasContext *ctx, arg_rorw *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_ZBB(ctx); + ctx->ol = MXL_RV32; + return gen_shift(ctx, a, EXT_NONE, gen_rorw); +} + +static bool trans_roriw(DisasContext *ctx, arg_roriw *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_ZBB(ctx); + ctx->ol = MXL_RV32; + return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_roriw); +} + static bool trans_rolw(DisasContext *ctx, arg_rolw *a) { REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - ctx->w = true; + REQUIRE_ZBB(ctx); + ctx->ol = MXL_RV32; return gen_shift(ctx, a, EXT_NONE, gen_rolw); } -static bool trans_grevw(DisasContext *ctx, arg_grevw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - ctx->w = true; - return gen_shift(ctx, a, EXT_ZERO, gen_helper_grev); -} - -static bool trans_greviw(DisasContext *ctx, arg_greviw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - ctx->w = true; - return gen_shift_imm_tl(ctx, a, EXT_ZERO, gen_helper_grev); -} - -static bool trans_gorcw(DisasContext *ctx, arg_gorcw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - ctx->w = true; - return gen_shift(ctx, a, EXT_ZERO, gen_helper_gorc); -} - -static bool trans_gorciw(DisasContext *ctx, arg_gorciw *a) -{ - REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); - ctx->w = true; - return gen_shift_imm_tl(ctx, a, EXT_ZERO, gen_helper_gorc); -} - #define GEN_SHADD_UW(SHAMT) \ static void gen_sh##SHAMT##add_uw(TCGv ret, TCGv arg1, TCGv arg2) \ { \ @@ -614,7 +446,7 @@ static bool trans_sh##SHAMT##add_uw(DisasContext *ctx, \ arg_sh##SHAMT##add_uw *a) \ { \ REQUIRE_64BIT(ctx); \ - REQUIRE_EXT(ctx, RVB); \ + REQUIRE_ZBA(ctx); \ return gen_arith(ctx, a, EXT_NONE, gen_sh##SHAMT##add_uw); \ } @@ -624,14 +456,16 @@ GEN_TRANS_SHADD_UW(3) static void gen_add_uw(TCGv ret, TCGv arg1, TCGv arg2) { - tcg_gen_ext32u_tl(arg1, arg1); - tcg_gen_add_tl(ret, arg1, arg2); + TCGv t = tcg_temp_new(); + tcg_gen_ext32u_tl(t, arg1); + tcg_gen_add_tl(ret, t, arg2); + tcg_temp_free(t); } static bool trans_add_uw(DisasContext *ctx, arg_add_uw *a) { REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); + REQUIRE_ZBA(ctx); return gen_arith(ctx, a, EXT_NONE, gen_add_uw); } @@ -643,6 +477,30 @@ static void gen_slli_uw(TCGv dest, TCGv src, target_long shamt) static bool trans_slli_uw(DisasContext *ctx, arg_slli_uw *a) { REQUIRE_64BIT(ctx); - REQUIRE_EXT(ctx, RVB); + REQUIRE_ZBA(ctx); return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_slli_uw); } + +static bool trans_clmul(DisasContext *ctx, arg_clmul *a) +{ + REQUIRE_ZBC(ctx); + return gen_arith(ctx, a, EXT_NONE, gen_helper_clmul); +} + +static void gen_clmulh(TCGv dst, TCGv src1, TCGv src2) +{ + gen_helper_clmulr(dst, src1, src2); + tcg_gen_shri_tl(dst, dst, 1); +} + +static bool trans_clmulh(DisasContext *ctx, arg_clmulr *a) +{ + REQUIRE_ZBC(ctx); + return gen_arith(ctx, a, EXT_NONE, gen_clmulh); +} + +static bool trans_clmulr(DisasContext *ctx, arg_clmulh *a) +{ + REQUIRE_ZBC(ctx); + return gen_arith(ctx, a, EXT_NONE, gen_helper_clmulr); +} diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc index db9ae15755..64fb0046f7 100644 --- a/target/riscv/insn_trans/trans_rvd.c.inc +++ b/target/riscv/insn_trans/trans_rvd.c.inc @@ -31,6 +31,7 @@ static bool trans_fld(DisasContext *ctx, arg_fld *a) tcg_gen_addi_tl(temp, addr, a->imm); addr = temp; } + addr = gen_pm_adjust_address(ctx, addr); tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, MO_TEQ); @@ -51,6 +52,7 @@ static bool trans_fsd(DisasContext *ctx, arg_fsd *a) tcg_gen_addi_tl(temp, addr, a->imm); addr = temp; } + addr = gen_pm_adjust_address(ctx, addr); tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEQ); diff --git a/target/riscv/insn_trans/trans_rvf.c.inc b/target/riscv/insn_trans/trans_rvf.c.inc index bddbd418d9..b5459249c4 100644 --- a/target/riscv/insn_trans/trans_rvf.c.inc +++ b/target/riscv/insn_trans/trans_rvf.c.inc @@ -37,6 +37,7 @@ static bool trans_flw(DisasContext *ctx, arg_flw *a) tcg_gen_addi_tl(temp, addr, a->imm); addr = temp; } + addr = gen_pm_adjust_address(ctx, addr); dest = cpu_fpr[a->rd]; tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_TEUL); @@ -59,6 +60,7 @@ static bool trans_fsw(DisasContext *ctx, arg_fsw *a) tcg_gen_addi_tl(temp, addr, a->imm); addr = temp; } + addr = gen_pm_adjust_address(ctx, addr); tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUL); diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc index 920ae0edb3..e51dbc41c5 100644 --- a/target/riscv/insn_trans/trans_rvi.c.inc +++ b/target/riscv/insn_trans/trans_rvi.c.inc @@ -71,9 +71,7 @@ static bool trans_jalr(DisasContext *ctx, arg_jalr *a) if (a->rd != 0) { tcg_gen_movi_tl(cpu_gpr[a->rd], ctx->pc_succ_insn); } - - /* No chaining with JALR. */ - lookup_and_goto_ptr(ctx); + tcg_gen_lookup_and_goto_ptr(); if (misaligned) { gen_set_label(misaligned); @@ -146,6 +144,7 @@ static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop) tcg_gen_addi_tl(temp, addr, a->imm); addr = temp; } + addr = gen_pm_adjust_address(ctx, addr); tcg_gen_qemu_ld_tl(dest, addr, ctx->mem_idx, memop); gen_set_gpr(ctx, a->rd, dest); @@ -187,6 +186,7 @@ static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop) tcg_gen_addi_tl(temp, addr, a->imm); addr = temp; } + addr = gen_pm_adjust_address(ctx, addr); tcg_gen_qemu_st_tl(data, addr, ctx->mem_idx, memop); return true; @@ -270,14 +270,26 @@ static bool trans_slli(DisasContext *ctx, arg_slli *a) return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl); } +static void gen_srliw(TCGv dst, TCGv src, target_long shamt) +{ + tcg_gen_extract_tl(dst, src, shamt, 32 - shamt); +} + static bool trans_srli(DisasContext *ctx, arg_srli *a) { - return gen_shift_imm_fn(ctx, a, EXT_ZERO, tcg_gen_shri_tl); + return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE, + tcg_gen_shri_tl, gen_srliw); +} + +static void gen_sraiw(TCGv dst, TCGv src, target_long shamt) +{ + tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt); } static bool trans_srai(DisasContext *ctx, arg_srai *a) { - return gen_shift_imm_fn(ctx, a, EXT_SIGN, tcg_gen_sari_tl); + return gen_shift_imm_fn_per_ol(ctx, a, EXT_NONE, + tcg_gen_sari_tl, gen_sraiw); } static bool trans_add(DisasContext *ctx, arg_add *a) @@ -333,73 +345,63 @@ static bool trans_and(DisasContext *ctx, arg_and *a) static bool trans_addiw(DisasContext *ctx, arg_addiw *a) { REQUIRE_64BIT(ctx); - ctx->w = true; + ctx->ol = MXL_RV32; return gen_arith_imm_fn(ctx, a, EXT_NONE, tcg_gen_addi_tl); } static bool trans_slliw(DisasContext *ctx, arg_slliw *a) { REQUIRE_64BIT(ctx); - ctx->w = true; + ctx->ol = MXL_RV32; return gen_shift_imm_fn(ctx, a, EXT_NONE, tcg_gen_shli_tl); } -static void gen_srliw(TCGv dst, TCGv src, target_long shamt) -{ - tcg_gen_extract_tl(dst, src, shamt, 32 - shamt); -} - static bool trans_srliw(DisasContext *ctx, arg_srliw *a) { REQUIRE_64BIT(ctx); - ctx->w = true; + ctx->ol = MXL_RV32; return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_srliw); } -static void gen_sraiw(TCGv dst, TCGv src, target_long shamt) -{ - tcg_gen_sextract_tl(dst, src, shamt, 32 - shamt); -} - static bool trans_sraiw(DisasContext *ctx, arg_sraiw *a) { REQUIRE_64BIT(ctx); - ctx->w = true; + ctx->ol = MXL_RV32; return gen_shift_imm_fn(ctx, a, EXT_NONE, gen_sraiw); } static bool trans_addw(DisasContext *ctx, arg_addw *a) { REQUIRE_64BIT(ctx); - ctx->w = true; + ctx->ol = MXL_RV32; return gen_arith(ctx, a, EXT_NONE, tcg_gen_add_tl); } static bool trans_subw(DisasContext *ctx, arg_subw *a) { REQUIRE_64BIT(ctx); - ctx->w = true; + ctx->ol = MXL_RV32; return gen_arith(ctx, a, EXT_NONE, tcg_gen_sub_tl); } static bool trans_sllw(DisasContext *ctx, arg_sllw *a) { REQUIRE_64BIT(ctx); - ctx->w = true; + ctx->ol = MXL_RV32; return gen_shift(ctx, a, EXT_NONE, tcg_gen_shl_tl); } static bool trans_srlw(DisasContext *ctx, arg_srlw *a) { REQUIRE_64BIT(ctx); - ctx->w = true; + ctx->ol = MXL_RV32; return gen_shift(ctx, a, EXT_ZERO, tcg_gen_shr_tl); } static bool trans_sraw(DisasContext *ctx, arg_sraw *a) { REQUIRE_64BIT(ctx); - ctx->w = true; + ctx->ol = MXL_RV32; return gen_shift(ctx, a, EXT_SIGN, tcg_gen_sar_tl); } @@ -421,7 +423,7 @@ static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a) * however we need to end the translation block */ tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn); - exit_tb(ctx); + tcg_gen_exit_tb(NULL, 0); ctx->base.is_jmp = DISAS_NORETURN; return true; } @@ -430,7 +432,7 @@ static bool do_csr_post(DisasContext *ctx) { /* We may have changed important cpu state -- exit to main loop. */ tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn); - exit_tb(ctx); + tcg_gen_exit_tb(NULL, 0); ctx->base.is_jmp = DISAS_NORETURN; return true; } diff --git a/target/riscv/insn_trans/trans_rvm.c.inc b/target/riscv/insn_trans/trans_rvm.c.inc index b89a85ad3a..2af0e5c139 100644 --- a/target/riscv/insn_trans/trans_rvm.c.inc +++ b/target/riscv/insn_trans/trans_rvm.c.inc @@ -33,10 +33,16 @@ static void gen_mulh(TCGv ret, TCGv s1, TCGv s2) tcg_temp_free(discard); } +static void gen_mulh_w(TCGv ret, TCGv s1, TCGv s2) +{ + tcg_gen_mul_tl(ret, s1, s2); + tcg_gen_sari_tl(ret, ret, 32); +} + static bool trans_mulh(DisasContext *ctx, arg_mulh *a) { REQUIRE_EXT(ctx, RVM); - return gen_arith(ctx, a, EXT_NONE, gen_mulh); + return gen_arith_per_ol(ctx, a, EXT_SIGN, gen_mulh, gen_mulh_w); } static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2) @@ -54,10 +60,23 @@ static void gen_mulhsu(TCGv ret, TCGv arg1, TCGv arg2) tcg_temp_free(rh); } +static void gen_mulhsu_w(TCGv ret, TCGv arg1, TCGv arg2) +{ + TCGv t1 = tcg_temp_new(); + TCGv t2 = tcg_temp_new(); + + tcg_gen_ext32s_tl(t1, arg1); + tcg_gen_ext32u_tl(t2, arg2); + tcg_gen_mul_tl(ret, t1, t2); + tcg_temp_free(t1); + tcg_temp_free(t2); + tcg_gen_sari_tl(ret, ret, 32); +} + static bool trans_mulhsu(DisasContext *ctx, arg_mulhsu *a) { REQUIRE_EXT(ctx, RVM); - return gen_arith(ctx, a, EXT_NONE, gen_mulhsu); + return gen_arith_per_ol(ctx, a, EXT_NONE, gen_mulhsu, gen_mulhsu_w); } static void gen_mulhu(TCGv ret, TCGv s1, TCGv s2) @@ -71,7 +90,8 @@ static void gen_mulhu(TCGv ret, TCGv s1, TCGv s2) static bool trans_mulhu(DisasContext *ctx, arg_mulhu *a) { REQUIRE_EXT(ctx, RVM); - return gen_arith(ctx, a, EXT_NONE, gen_mulhu); + /* gen_mulh_w works for either sign as input. */ + return gen_arith_per_ol(ctx, a, EXT_ZERO, gen_mulhu, gen_mulh_w); } static void gen_div(TCGv ret, TCGv source1, TCGv source2) @@ -214,7 +234,7 @@ static bool trans_mulw(DisasContext *ctx, arg_mulw *a) { REQUIRE_64BIT(ctx); REQUIRE_EXT(ctx, RVM); - ctx->w = true; + ctx->ol = MXL_RV32; return gen_arith(ctx, a, EXT_NONE, tcg_gen_mul_tl); } @@ -222,7 +242,7 @@ static bool trans_divw(DisasContext *ctx, arg_divw *a) { REQUIRE_64BIT(ctx); REQUIRE_EXT(ctx, RVM); - ctx->w = true; + ctx->ol = MXL_RV32; return gen_arith(ctx, a, EXT_SIGN, gen_div); } @@ -230,7 +250,7 @@ static bool trans_divuw(DisasContext *ctx, arg_divuw *a) { REQUIRE_64BIT(ctx); REQUIRE_EXT(ctx, RVM); - ctx->w = true; + ctx->ol = MXL_RV32; return gen_arith(ctx, a, EXT_ZERO, gen_divu); } @@ -238,7 +258,7 @@ static bool trans_remw(DisasContext *ctx, arg_remw *a) { REQUIRE_64BIT(ctx); REQUIRE_EXT(ctx, RVM); - ctx->w = true; + ctx->ol = MXL_RV32; return gen_arith(ctx, a, EXT_SIGN, gen_rem); } @@ -246,6 +266,6 @@ static bool trans_remuw(DisasContext *ctx, arg_remuw *a) { REQUIRE_64BIT(ctx); REQUIRE_EXT(ctx, RVM); - ctx->w = true; + ctx->ol = MXL_RV32; return gen_arith(ctx, a, EXT_ZERO, gen_remu); } diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc index fa451938f1..17ee3babef 100644 --- a/target/riscv/insn_trans/trans_rvv.c.inc +++ b/target/riscv/insn_trans/trans_rvv.c.inc @@ -41,7 +41,7 @@ static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a) gen_set_gpr(ctx, a->rd, dst); tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn); - lookup_and_goto_ptr(ctx); + tcg_gen_lookup_and_goto_ptr(); ctx->base.is_jmp = DISAS_NORETURN; return true; } @@ -704,18 +704,20 @@ static bool amo_op(DisasContext *s, arg_rwdvm *a, uint8_t seq) gen_helper_exit_atomic(cpu_env); s->base.is_jmp = DISAS_NORETURN; return true; - } else { - if (s->sew == 3) { - if (!is_32bit(s)) { - fn = fnsd[seq]; - } else { - /* Check done in amo_check(). */ - g_assert_not_reached(); - } - } else { - assert(seq < ARRAY_SIZE(fnsw)); - fn = fnsw[seq]; - } + } + + switch (s->sew) { + case 0 ... 2: + assert(seq < ARRAY_SIZE(fnsw)); + fn = fnsw[seq]; + break; + case 3: + /* XLEN check done in amo_check(). */ + assert(seq < ARRAY_SIZE(fnsd)); + fn = fnsd[seq]; + break; + default: + g_assert_not_reached(); } data = FIELD_DP32(data, VDATA, MLEN, s->mlen); @@ -743,7 +745,8 @@ static bool amo_check(DisasContext *s, arg_rwdvm* a) static bool amo_check64(DisasContext *s, arg_rwdvm* a) { - return !is_32bit(s) && amo_check(s, a); + REQUIRE_64BIT(s); + return amo_check(s, a); } GEN_VEXT_TRANS(vamoswapw_v, 0, rwdvm, amo_op, amo_check) @@ -1619,7 +1622,8 @@ static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a) tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1), - cpu_env, 0, s->vlen / 8, data, fns[s->sew]); + cpu_env, s->vlen / 8, s->vlen / 8, data, + fns[s->sew]); gen_set_label(over); } return true; diff --git a/target/riscv/machine.c b/target/riscv/machine.c index 16a08302da..7b4c739564 100644 --- a/target/riscv/machine.c +++ b/target/riscv/machine.c @@ -84,6 +84,14 @@ static bool vector_needed(void *opaque) return riscv_has_ext(env, RVV); } +static bool pointermasking_needed(void *opaque) +{ + RISCVCPU *cpu = opaque; + CPURISCVState *env = &cpu->env; + + return riscv_has_ext(env, RVJ); +} + static const VMStateDescription vmstate_vector = { .name = "cpu/vector", .version_id = 1, @@ -100,6 +108,24 @@ static const VMStateDescription vmstate_vector = { } }; +static const VMStateDescription vmstate_pointermasking = { + .name = "cpu/pointer_masking", + .version_id = 1, + .minimum_version_id = 1, + .needed = pointermasking_needed, + .fields = (VMStateField[]) { + VMSTATE_UINTTL(env.mmte, RISCVCPU), + VMSTATE_UINTTL(env.mpmmask, RISCVCPU), + VMSTATE_UINTTL(env.mpmbase, RISCVCPU), + VMSTATE_UINTTL(env.spmmask, RISCVCPU), + VMSTATE_UINTTL(env.spmbase, RISCVCPU), + VMSTATE_UINTTL(env.upmmask, RISCVCPU), + VMSTATE_UINTTL(env.upmbase, RISCVCPU), + + VMSTATE_END_OF_LIST() + } +}; + static const VMStateDescription vmstate_hyper = { .name = "cpu/hyper", .version_id = 1, @@ -140,8 +166,8 @@ static const VMStateDescription vmstate_hyper = { const VMStateDescription vmstate_riscv_cpu = { .name = "cpu", - .version_id = 2, - .minimum_version_id = 2, + .version_id = 3, + .minimum_version_id = 3, .fields = (VMStateField[]) { VMSTATE_UINTTL_ARRAY(env.gpr, RISCVCPU, 32), VMSTATE_UINT64_ARRAY(env.fpr, RISCVCPU, 32), @@ -153,8 +179,10 @@ const VMStateDescription vmstate_riscv_cpu = { VMSTATE_UINTTL(env.guest_phys_fault_addr, RISCVCPU), VMSTATE_UINTTL(env.priv_ver, RISCVCPU), VMSTATE_UINTTL(env.vext_ver, RISCVCPU), - VMSTATE_UINTTL(env.misa, RISCVCPU), - VMSTATE_UINTTL(env.misa_mask, RISCVCPU), + VMSTATE_UINT32(env.misa_mxl, RISCVCPU), + VMSTATE_UINT32(env.misa_ext, RISCVCPU), + VMSTATE_UINT32(env.misa_mxl_max, RISCVCPU), + VMSTATE_UINT32(env.misa_ext_mask, RISCVCPU), VMSTATE_UINT32(env.features, RISCVCPU), VMSTATE_UINTTL(env.priv, RISCVCPU), VMSTATE_UINTTL(env.virt, RISCVCPU), @@ -189,6 +217,7 @@ const VMStateDescription vmstate_riscv_cpu = { &vmstate_pmp, &vmstate_hyper, &vmstate_vector, + &vmstate_pointermasking, NULL } }; diff --git a/target/riscv/monitor.c b/target/riscv/monitor.c index f7e6ea72b3..7efb4b62c1 100644 --- a/target/riscv/monitor.c +++ b/target/riscv/monitor.c @@ -150,7 +150,7 @@ static void mem_info_svxx(Monitor *mon, CPUArchState *env) target_ulong last_size; int last_attr; - if (riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) == MXL_RV32) { base = (hwaddr)get_field(env->satp, SATP32_PPN) << PGSHIFT; vm = get_field(env->satp, SATP32_MODE); } else { @@ -220,7 +220,7 @@ void hmp_info_mem(Monitor *mon, const QDict *qdict) return; } - if (riscv_cpu_is_32bit(env)) { + if (riscv_cpu_mxl(env) == MXL_RV32) { if (!(env->satp & SATP32_MODE)) { monitor_printf(mon, "No translation or protection\n"); return; diff --git a/target/riscv/translate.c b/target/riscv/translate.c index 74b33fa3c9..1d57bc97b5 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -36,6 +36,9 @@ static TCGv cpu_gpr[32], cpu_pc, cpu_vl; static TCGv_i64 cpu_fpr[32]; /* assume F and D extensions */ static TCGv load_res; static TCGv load_val; +/* globals for PM CSRs */ +static TCGv pm_mask[4]; +static TCGv pm_base[4]; #include "exec/gen-icount.h" @@ -55,9 +58,11 @@ typedef struct DisasContext { /* pc_succ_insn points to the instruction following base.pc_next */ target_ulong pc_succ_insn; target_ulong priv_ver; - target_ulong misa; + RISCVMXL xl; + uint32_t misa_ext; uint32_t opcode; uint32_t mstatus_fs; + uint32_t mstatus_hs_fs; uint32_t mem_idx; /* Remember the rounding mode encoded in the previous fp instruction, which we have already installed into env->fp_status. Or -1 for @@ -65,7 +70,7 @@ typedef struct DisasContext { to any system register, which includes CSR_FRM, so we do not have to reset this known value. */ int frm; - bool w; + RISCVMXL ol; bool virt_enabled; bool ext_ifencei; bool hlsx; @@ -81,30 +86,42 @@ typedef struct DisasContext { TCGv zero; /* Space for 3 operands plus 1 extra for address computation. */ TCGv temp[4]; + /* PointerMasking extension */ + bool pm_enabled; + TCGv pm_mask; + TCGv pm_base; } DisasContext; static inline bool has_ext(DisasContext *ctx, uint32_t ext) { - return ctx->misa & ext; + return ctx->misa_ext & ext; } #ifdef TARGET_RISCV32 -# define is_32bit(ctx) true +#define get_xl(ctx) MXL_RV32 #elif defined(CONFIG_USER_ONLY) -# define is_32bit(ctx) false +#define get_xl(ctx) MXL_RV64 #else -static inline bool is_32bit(DisasContext *ctx) -{ - return (ctx->misa & RV32) == RV32; -} +#define get_xl(ctx) ((ctx)->xl) #endif -/* The word size for this operation. */ -static inline int oper_len(DisasContext *ctx) +/* The word size for this machine mode. */ +static inline int __attribute__((unused)) get_xlen(DisasContext *ctx) { - return ctx->w ? 32 : TARGET_LONG_BITS; + return 16 << get_xl(ctx); } +/* The operation length, as opposed to the xlen. */ +#ifdef TARGET_RISCV32 +#define get_ol(ctx) MXL_RV32 +#else +#define get_ol(ctx) ((ctx)->ol) +#endif + +static inline int get_olen(DisasContext *ctx) +{ + return 16 << get_ol(ctx); +} /* * RISC-V requires NaN-boxing of narrower width floating point values. @@ -148,31 +165,6 @@ static void generate_exception_mtval(DisasContext *ctx, int excp) ctx->base.is_jmp = DISAS_NORETURN; } -static void gen_exception_debug(void) -{ - gen_helper_raise_exception(cpu_env, tcg_constant_i32(EXCP_DEBUG)); -} - -/* Wrapper around tcg_gen_exit_tb that handles single stepping */ -static void exit_tb(DisasContext *ctx) -{ - if (ctx->base.singlestep_enabled) { - gen_exception_debug(); - } else { - tcg_gen_exit_tb(NULL, 0); - } -} - -/* Wrapper around tcg_gen_lookup_and_goto_ptr that handles single stepping */ -static void lookup_and_goto_ptr(DisasContext *ctx) -{ - if (ctx->base.singlestep_enabled) { - gen_exception_debug(); - } else { - tcg_gen_lookup_and_goto_ptr(); - } -} - static void gen_exception_illegal(DisasContext *ctx) { generate_exception(ctx, RISCV_EXCP_ILLEGAL_INST); @@ -191,7 +183,7 @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) tcg_gen_exit_tb(ctx->base.tb, n); } else { tcg_gen_movi_tl(cpu_pc, dest); - lookup_and_goto_ptr(ctx); + tcg_gen_lookup_and_goto_ptr(); } } @@ -217,24 +209,34 @@ static TCGv get_gpr(DisasContext *ctx, int reg_num, DisasExtend ext) return ctx->zero; } - switch (ctx->w ? ext : EXT_NONE) { - case EXT_NONE: - return cpu_gpr[reg_num]; - case EXT_SIGN: - t = temp_new(ctx); - tcg_gen_ext32s_tl(t, cpu_gpr[reg_num]); - return t; - case EXT_ZERO: - t = temp_new(ctx); - tcg_gen_ext32u_tl(t, cpu_gpr[reg_num]); - return t; + switch (get_ol(ctx)) { + case MXL_RV32: + switch (ext) { + case EXT_NONE: + break; + case EXT_SIGN: + t = temp_new(ctx); + tcg_gen_ext32s_tl(t, cpu_gpr[reg_num]); + return t; + case EXT_ZERO: + t = temp_new(ctx); + tcg_gen_ext32u_tl(t, cpu_gpr[reg_num]); + return t; + default: + g_assert_not_reached(); + } + break; + case MXL_RV64: + break; + default: + g_assert_not_reached(); } - g_assert_not_reached(); + return cpu_gpr[reg_num]; } static TCGv dest_gpr(DisasContext *ctx, int reg_num) { - if (reg_num == 0 || ctx->w) { + if (reg_num == 0 || get_olen(ctx) < TARGET_LONG_BITS) { return temp_new(ctx); } return cpu_gpr[reg_num]; @@ -243,10 +245,15 @@ static TCGv dest_gpr(DisasContext *ctx, int reg_num) static void gen_set_gpr(DisasContext *ctx, int reg_num, TCGv t) { if (reg_num != 0) { - if (ctx->w) { + switch (get_ol(ctx)) { + case MXL_RV32: tcg_gen_ext32s_tl(cpu_gpr[reg_num], t); - } else { + break; + case MXL_RV64: tcg_gen_mov_tl(cpu_gpr[reg_num], t); + break; + default: + g_assert_not_reached(); } } } @@ -271,6 +278,23 @@ static void gen_jal(DisasContext *ctx, int rd, target_ulong imm) ctx->base.is_jmp = DISAS_NORETURN; } +/* + * Generates address adjustment for PointerMasking + */ +static TCGv gen_pm_adjust_address(DisasContext *s, TCGv src) +{ + TCGv temp; + if (!s->pm_enabled) { + /* Load unmodified address */ + return src; + } else { + temp = temp_new(s); + tcg_gen_andc_tl(temp, src, s->pm_mask); + tcg_gen_or_tl(temp, temp, s->pm_base); + return temp; + } +} + #ifndef CONFIG_USER_ONLY /* The states of mstatus_fs are: * 0 = disabled, 1 = initial, 2 = clean, 3 = dirty @@ -280,27 +304,28 @@ static void gen_jal(DisasContext *ctx, int rd, target_ulong imm) static void mark_fs_dirty(DisasContext *ctx) { TCGv tmp; - target_ulong sd; - if (ctx->mstatus_fs == MSTATUS_FS) { - return; + if (ctx->mstatus_fs != MSTATUS_FS) { + /* Remember the state change for the rest of the TB. */ + ctx->mstatus_fs = MSTATUS_FS; + + tmp = tcg_temp_new(); + tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus)); + tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS); + tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus)); + tcg_temp_free(tmp); } - /* Remember the state change for the rest of the TB. */ - ctx->mstatus_fs = MSTATUS_FS; - tmp = tcg_temp_new(); - sd = is_32bit(ctx) ? MSTATUS32_SD : MSTATUS64_SD; + if (ctx->virt_enabled && ctx->mstatus_hs_fs != MSTATUS_FS) { + /* Remember the stage change for the rest of the TB. */ + ctx->mstatus_hs_fs = MSTATUS_FS; - tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus)); - tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS | sd); - tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus)); - - if (ctx->virt_enabled) { + tmp = tcg_temp_new(); tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs)); - tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS | sd); + tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS); tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs)); + tcg_temp_free(tmp); } - tcg_temp_free(tmp); } #else static inline void mark_fs_dirty(DisasContext *ctx) { } @@ -337,10 +362,16 @@ EX_SH(12) } \ } while (0) -#define REQUIRE_64BIT(ctx) do { \ - if (is_32bit(ctx)) { \ - return false; \ - } \ +#define REQUIRE_32BIT(ctx) do { \ + if (get_xl(ctx) != MXL_RV32) { \ + return false; \ + } \ +} while (0) + +#define REQUIRE_64BIT(ctx) do { \ + if (get_xl(ctx) < MXL_RV64) { \ + return false; \ + } \ } while (0) static int ex_rvc_register(DisasContext *ctx, int reg) @@ -395,11 +426,27 @@ static bool gen_arith(DisasContext *ctx, arg_r *a, DisasExtend ext, return true; } +static bool gen_arith_per_ol(DisasContext *ctx, arg_r *a, DisasExtend ext, + void (*f_tl)(TCGv, TCGv, TCGv), + void (*f_32)(TCGv, TCGv, TCGv)) +{ + int olen = get_olen(ctx); + + if (olen != TARGET_LONG_BITS) { + if (olen == 32) { + f_tl = f_32; + } else { + g_assert_not_reached(); + } + } + return gen_arith(ctx, a, ext, f_tl); +} + static bool gen_shift_imm_fn(DisasContext *ctx, arg_shift *a, DisasExtend ext, void (*func)(TCGv, TCGv, target_long)) { TCGv dest, src1; - int max_len = oper_len(ctx); + int max_len = get_olen(ctx); if (a->shamt >= max_len) { return false; @@ -414,11 +461,27 @@ static bool gen_shift_imm_fn(DisasContext *ctx, arg_shift *a, DisasExtend ext, return true; } +static bool gen_shift_imm_fn_per_ol(DisasContext *ctx, arg_shift *a, + DisasExtend ext, + void (*f_tl)(TCGv, TCGv, target_long), + void (*f_32)(TCGv, TCGv, target_long)) +{ + int olen = get_olen(ctx); + if (olen != TARGET_LONG_BITS) { + if (olen == 32) { + f_tl = f_32; + } else { + g_assert_not_reached(); + } + } + return gen_shift_imm_fn(ctx, a, ext, f_tl); +} + static bool gen_shift_imm_tl(DisasContext *ctx, arg_shift *a, DisasExtend ext, void (*func)(TCGv, TCGv, TCGv)) { TCGv dest, src1, src2; - int max_len = oper_len(ctx); + int max_len = get_olen(ctx); if (a->shamt >= max_len) { return false; @@ -442,7 +505,7 @@ static bool gen_shift(DisasContext *ctx, arg_r *a, DisasExtend ext, TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE); TCGv ext2 = tcg_temp_new(); - tcg_gen_andi_tl(ext2, src2, oper_len(ctx) - 1); + tcg_gen_andi_tl(ext2, src2, get_olen(ctx) - 1); func(dest, src1, ext2); gen_set_gpr(ctx, a->rd, dest); @@ -450,6 +513,21 @@ static bool gen_shift(DisasContext *ctx, arg_r *a, DisasExtend ext, return true; } +static bool gen_shift_per_ol(DisasContext *ctx, arg_r *a, DisasExtend ext, + void (*f_tl)(TCGv, TCGv, TCGv), + void (*f_32)(TCGv, TCGv, TCGv)) +{ + int olen = get_olen(ctx); + if (olen != TARGET_LONG_BITS) { + if (olen == 32) { + f_tl = f_32; + } else { + g_assert_not_reached(); + } + } + return gen_shift(ctx, a, ext, f_tl); +} + static bool gen_unary(DisasContext *ctx, arg_r2 *a, DisasExtend ext, void (*func)(TCGv, TCGv)) { @@ -462,6 +540,22 @@ static bool gen_unary(DisasContext *ctx, arg_r2 *a, DisasExtend ext, return true; } +static bool gen_unary_per_ol(DisasContext *ctx, arg_r2 *a, DisasExtend ext, + void (*f_tl)(TCGv, TCGv), + void (*f_32)(TCGv, TCGv)) +{ + int olen = get_olen(ctx); + + if (olen != TARGET_LONG_BITS) { + if (olen == 32) { + f_tl = f_32; + } else { + g_assert_not_reached(); + } + } + return gen_unary(ctx, a, ext, f_tl); +} + static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) { DisasContext *ctx = container_of(dcbase, DisasContext, base); @@ -517,7 +611,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) uint32_t tb_flags = ctx->base.tb->flags; ctx->pc_succ_insn = ctx->base.pc_first; - ctx->mem_idx = tb_flags & TB_FLAGS_MMU_MASK; + ctx->mem_idx = FIELD_EX32(tb_flags, TB_FLAGS, MEM_IDX); ctx->mstatus_fs = tb_flags & TB_FLAGS_MSTATUS_FS; ctx->priv_ver = env->priv_ver; #if !defined(CONFIG_USER_ONLY) @@ -529,20 +623,25 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) #else ctx->virt_enabled = false; #endif - ctx->misa = env->misa; + ctx->misa_ext = env->misa_ext; ctx->frm = -1; /* unknown rounding mode */ ctx->ext_ifencei = cpu->cfg.ext_ifencei; ctx->vlen = cpu->cfg.vlen; + ctx->mstatus_hs_fs = FIELD_EX32(tb_flags, TB_FLAGS, MSTATUS_HS_FS); ctx->hlsx = FIELD_EX32(tb_flags, TB_FLAGS, HLSX); ctx->vill = FIELD_EX32(tb_flags, TB_FLAGS, VILL); ctx->sew = FIELD_EX32(tb_flags, TB_FLAGS, SEW); ctx->lmul = FIELD_EX32(tb_flags, TB_FLAGS, LMUL); ctx->mlen = 1 << (ctx->sew + 3 - ctx->lmul); ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX); + ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL); ctx->cs = cs; - ctx->w = false; ctx->ntemp = 0; memset(ctx->temp, 0, sizeof(ctx->temp)); + ctx->pm_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_ENABLED); + int priv = tb_flags & TB_FLAGS_PRIV_MMU_MASK; + ctx->pm_mask = pm_mask[priv]; + ctx->pm_base = pm_base[priv]; ctx->zero = tcg_constant_tl(0); } @@ -564,9 +663,9 @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) CPURISCVState *env = cpu->env_ptr; uint16_t opcode16 = translator_lduw(env, &ctx->base, ctx->base.pc_next); + ctx->ol = ctx->xl; decode_opc(env, ctx, opcode16); ctx->base.pc_next = ctx->pc_succ_insn; - ctx->w = false; for (int i = ctx->ntemp - 1; i >= 0; --i) { tcg_temp_free(ctx->temp[i]); @@ -656,4 +755,19 @@ void riscv_translate_init(void) "load_res"); load_val = tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, load_val), "load_val"); +#ifndef CONFIG_USER_ONLY + /* Assign PM CSRs to tcg globals */ + pm_mask[PRV_U] = + tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, upmmask), "upmmask"); + pm_base[PRV_U] = + tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, upmbase), "upmbase"); + pm_mask[PRV_S] = + tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, spmmask), "spmmask"); + pm_base[PRV_S] = + tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, spmbase), "spmbase"); + pm_mask[PRV_M] = + tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, mpmmask), "mpmmask"); + pm_base[PRV_M] = + tcg_global_mem_new(cpu_env, offsetof(CPURISCVState, mpmbase), "mpmbase"); +#endif } diff --git a/target/rx/helper.h b/target/rx/helper.h index f0b7ebbbf7..ebb4739474 100644 --- a/target/rx/helper.h +++ b/target/rx/helper.h @@ -2,7 +2,6 @@ DEF_HELPER_1(raise_illegal_instruction, noreturn, env) DEF_HELPER_1(raise_access_fault, noreturn, env) DEF_HELPER_1(raise_privilege_violation, noreturn, env) DEF_HELPER_1(wait, noreturn, env) -DEF_HELPER_1(debug, noreturn, env) DEF_HELPER_2(rxint, noreturn, env, i32) DEF_HELPER_1(rxbrk, noreturn, env) DEF_HELPER_FLAGS_3(fadd, TCG_CALL_NO_WG, f32, env, f32, f32) diff --git a/target/rx/op_helper.c b/target/rx/op_helper.c index 4d315b4449..11f952d340 100644 --- a/target/rx/op_helper.c +++ b/target/rx/op_helper.c @@ -451,14 +451,6 @@ void QEMU_NORETURN helper_wait(CPURXState *env) raise_exception(env, EXCP_HLT, 0); } -void QEMU_NORETURN helper_debug(CPURXState *env) -{ - CPUState *cs = env_cpu(env); - - cs->exception_index = EXCP_DEBUG; - cpu_loop_exit(cs); -} - void QEMU_NORETURN helper_rxint(CPURXState *env, uint32_t vec) { raise_exception(env, 0x100 + vec, 0); diff --git a/target/rx/translate.c b/target/rx/translate.c index a3cf720455..5db8f79a82 100644 --- a/target/rx/translate.c +++ b/target/rx/translate.c @@ -150,11 +150,7 @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest) tcg_gen_exit_tb(dc->base.tb, n); } else { tcg_gen_movi_i32(cpu_pc, dest); - if (dc->base.singlestep_enabled) { - gen_helper_debug(cpu_env); - } else { - tcg_gen_lookup_and_goto_ptr(); - } + tcg_gen_lookup_and_goto_ptr(); } dc->base.is_jmp = DISAS_NORETURN; } @@ -2331,11 +2327,7 @@ static void rx_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) gen_goto_tb(ctx, 0, dcbase->pc_next); break; case DISAS_JUMP: - if (ctx->base.singlestep_enabled) { - gen_helper_debug(cpu_env); - } else { - tcg_gen_lookup_and_goto_ptr(); - } + tcg_gen_lookup_and_goto_ptr(); break; case DISAS_UPDATE: tcg_gen_movi_i32(cpu_pc, ctx->base.pc_next); diff --git a/target/s390x/cpu.c b/target/s390x/cpu.c index 7b7b05f1d3..ccdbaf84d5 100644 --- a/target/s390x/cpu.c +++ b/target/s390x/cpu.c @@ -266,9 +266,12 @@ static void s390_cpu_reset_full(DeviceState *dev) static const struct TCGCPUOps s390_tcg_ops = { .initialize = s390x_translate_init, - .tlb_fill = s390_cpu_tlb_fill, -#if !defined(CONFIG_USER_ONLY) +#ifdef CONFIG_USER_ONLY + .record_sigsegv = s390_cpu_record_sigsegv, + .record_sigbus = s390_cpu_record_sigbus, +#else + .tlb_fill = s390_cpu_tlb_fill, .cpu_exec_interrupt = s390_cpu_exec_interrupt, .do_interrupt = s390_cpu_do_interrupt, .debug_excp_handler = s390x_cpu_debug_excp_handler, diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c index 4e4598cc77..11e06cc51f 100644 --- a/target/s390x/cpu_models.c +++ b/target/s390x/cpu_models.c @@ -398,14 +398,14 @@ void s390_cpu_list(void) for (feat = 0; feat < S390_FEAT_MAX; feat++) { const S390FeatDef *def = s390_feat_def(feat); - qemu_printf("%-20s %-50s\n", def->name, def->desc); + qemu_printf("%-20s %s\n", def->name, def->desc); } qemu_printf("\nRecognized feature groups:\n"); for (group = 0; group < S390_FEAT_GROUP_MAX; group++) { const S390FeatGroupDef *def = s390_feat_group_def(group); - qemu_printf("%-20s %-50s\n", def->name, def->desc); + qemu_printf("%-20s %s\n", def->name, def->desc); } } diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h index 27d4a03ca1..1a178aed41 100644 --- a/target/s390x/s390x-internal.h +++ b/target/s390x/s390x-internal.h @@ -270,12 +270,21 @@ ObjectClass *s390_cpu_class_by_name(const char *name); void s390x_cpu_debug_excp_handler(CPUState *cs); void s390_cpu_do_interrupt(CPUState *cpu); bool s390_cpu_exec_interrupt(CPUState *cpu, int int_req); + +#ifdef CONFIG_USER_ONLY +void s390_cpu_record_sigsegv(CPUState *cs, vaddr address, + MMUAccessType access_type, + bool maperr, uintptr_t retaddr); +void s390_cpu_record_sigbus(CPUState *cs, vaddr address, + MMUAccessType access_type, uintptr_t retaddr); +#else bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) QEMU_NORETURN; +#endif /* fpu_helper.c */ diff --git a/target/s390x/sigp.c b/target/s390x/sigp.c index d57427ced8..51c727834c 100644 --- a/target/s390x/sigp.c +++ b/target/s390x/sigp.c @@ -428,26 +428,10 @@ static int handle_sigp_single_dst(S390CPU *cpu, S390CPU *dst_cpu, uint8_t order, static int sigp_set_architecture(S390CPU *cpu, uint32_t param, uint64_t *status_reg) { - CPUState *cur_cs; - S390CPU *cur_cpu; - bool all_stopped = true; - - CPU_FOREACH(cur_cs) { - cur_cpu = S390_CPU(cur_cs); - - if (cur_cpu == cpu) { - continue; - } - if (s390_cpu_get_state(cur_cpu) != S390_CPU_STATE_STOPPED) { - all_stopped = false; - } - } - *status_reg &= 0xffffffff00000000ULL; /* Reject set arch order, with czam we're always in z/Arch mode. */ - *status_reg |= (all_stopped ? SIGP_STAT_INVALID_PARAMETER : - SIGP_STAT_INCORRECT_STATE); + *status_reg |= SIGP_STAT_INVALID_PARAMETER; return SIGP_CC_STATUS_STORED; } diff --git a/target/s390x/tcg/excp_helper.c b/target/s390x/tcg/excp_helper.c index 3d6662a53c..4e7648f301 100644 --- a/target/s390x/tcg/excp_helper.c +++ b/target/s390x/tcg/excp_helper.c @@ -82,6 +82,19 @@ void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc) tcg_s390_data_exception(env, dxc, GETPC()); } +/* + * Unaligned accesses are only diagnosed with MO_ALIGN. At the moment, + * this is only for the atomic operations, for which we want to raise a + * specification exception. + */ +static void QEMU_NORETURN do_unaligned_access(CPUState *cs, uintptr_t retaddr) +{ + S390CPU *cpu = S390_CPU(cs); + CPUS390XState *env = &cpu->env; + + tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr); +} + #if defined(CONFIG_USER_ONLY) void s390_cpu_do_interrupt(CPUState *cs) @@ -89,19 +102,29 @@ void s390_cpu_do_interrupt(CPUState *cs) cs->exception_index = -1; } -bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) +void s390_cpu_record_sigsegv(CPUState *cs, vaddr address, + MMUAccessType access_type, + bool maperr, uintptr_t retaddr) { S390CPU *cpu = S390_CPU(cs); - trigger_pgm_exception(&cpu->env, PGM_ADDRESSING); - /* On real machines this value is dropped into LowMem. Since this - is userland, simply put this someplace that cpu_loop can find it. */ - cpu->env.__excp_addr = address; + trigger_pgm_exception(&cpu->env, maperr ? PGM_ADDRESSING : PGM_PROTECTION); + /* + * On real machines this value is dropped into LowMem. Since this + * is userland, simply put this someplace that cpu_loop can find it. + * S390 only gives the page of the fault, not the exact address. + * C.f. the construction of TEC in mmu_translate(). + */ + cpu->env.__excp_addr = address & TARGET_PAGE_MASK; cpu_loop_exit_restore(cs, retaddr); } +void s390_cpu_record_sigbus(CPUState *cs, vaddr address, + MMUAccessType access_type, uintptr_t retaddr) +{ + do_unaligned_access(cs, retaddr); +} + #else /* !CONFIG_USER_ONLY */ static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx) @@ -589,17 +612,11 @@ void s390x_cpu_debug_excp_handler(CPUState *cs) } } -/* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment, - this is only for the atomic operations, for which we want to raise a - specification exception. */ void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { - S390CPU *cpu = S390_CPU(cs); - CPUS390XState *env = &cpu->env; - - tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr); + do_unaligned_access(cs, retaddr); } static void QEMU_NORETURN monitor_event(CPUS390XState *env, diff --git a/target/s390x/tcg/mem_helper.c b/target/s390x/tcg/mem_helper.c index 0bf775a37d..362a30d99e 100644 --- a/target/s390x/tcg/mem_helper.c +++ b/target/s390x/tcg/mem_helper.c @@ -27,7 +27,6 @@ #include "exec/cpu_ldst.h" #include "qemu/int128.h" #include "qemu/atomic128.h" -#include "tcg/tcg.h" #include "trace.h" #if !defined(CONFIG_USER_ONLY) @@ -142,20 +141,12 @@ static int s390_probe_access(CPUArchState *env, target_ulong addr, int size, MMUAccessType access_type, int mmu_idx, bool nonfault, void **phost, uintptr_t ra) { +#if defined(CONFIG_USER_ONLY) + return probe_access_flags(env, addr, access_type, mmu_idx, + nonfault, phost, ra); +#else int flags; -#if defined(CONFIG_USER_ONLY) - flags = page_get_flags(addr); - if (!(flags & (access_type == MMU_DATA_LOAD ? PAGE_READ : PAGE_WRITE_ORG))) { - env->__excp_addr = addr; - flags = (flags & PAGE_VALID) ? PGM_PROTECTION : PGM_ADDRESSING; - if (nonfault) { - return flags; - } - tcg_s390_program_interrupt(env, flags, ra); - } - *phost = g2h(env_cpu(env), addr); -#else /* * For !CONFIG_USER_ONLY, we cannot rely on TLB_INVALID_MASK or haddr==NULL * to detect if there was an exception during tlb_fill(). @@ -174,8 +165,8 @@ static int s390_probe_access(CPUArchState *env, target_ulong addr, int size, (access_type == MMU_DATA_STORE ? BP_MEM_WRITE : BP_MEM_READ), ra); } -#endif return 0; +#endif } static int access_prepare_nf(S390Access *access, CPUS390XState *env, @@ -239,7 +230,7 @@ static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr, g_assert(haddr); memset(haddr, byte, size); #else - TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); + MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); int i; if (likely(haddr)) { @@ -250,13 +241,13 @@ static void do_access_memset(CPUS390XState *env, vaddr vaddr, char *haddr, * page. This is especially relevant to speed up TLB_NOTDIRTY. */ g_assert(size > 0); - helper_ret_stb_mmu(env, vaddr, byte, oi, ra); + cpu_stb_mmu(env, vaddr, byte, oi, ra); haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx); if (likely(haddr)) { memset(haddr + 1, byte, size - 1); } else { for (i = 1; i < size; i++) { - helper_ret_stb_mmu(env, vaddr + i, byte, oi, ra); + cpu_stb_mmu(env, vaddr + i, byte, oi, ra); } } } @@ -282,7 +273,7 @@ static uint8_t do_access_get_byte(CPUS390XState *env, vaddr vaddr, char **haddr, #ifdef CONFIG_USER_ONLY return ldub_p(*haddr + offset); #else - TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); + MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); uint8_t byte; if (likely(*haddr)) { @@ -292,7 +283,7 @@ static uint8_t do_access_get_byte(CPUS390XState *env, vaddr vaddr, char **haddr, * Do a single access and test if we can then get access to the * page. This is especially relevant to speed up TLB_NOTDIRTY. */ - byte = helper_ret_ldub_mmu(env, vaddr + offset, oi, ra); + byte = cpu_ldb_mmu(env, vaddr + offset, oi, ra); *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_LOAD, mmu_idx); return byte; #endif @@ -316,7 +307,7 @@ static void do_access_set_byte(CPUS390XState *env, vaddr vaddr, char **haddr, #ifdef CONFIG_USER_ONLY stb_p(*haddr + offset, byte); #else - TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); + MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); if (likely(*haddr)) { stb_p(*haddr + offset, byte); @@ -326,7 +317,7 @@ static void do_access_set_byte(CPUS390XState *env, vaddr vaddr, char **haddr, * Do a single access and test if we can then get access to the * page. This is especially relevant to speed up TLB_NOTDIRTY. */ - helper_ret_stb_mmu(env, vaddr + offset, byte, oi, ra); + cpu_stb_mmu(env, vaddr + offset, byte, oi, ra); *haddr = tlb_vaddr_to_host(env, vaddr, MMU_DATA_STORE, mmu_idx); #endif } @@ -1804,14 +1795,14 @@ void HELPER(cdsg_parallel)(CPUS390XState *env, uint64_t addr, Int128 cmpv = int128_make128(env->regs[r1 + 1], env->regs[r1]); Int128 newv = int128_make128(env->regs[r3 + 1], env->regs[r3]); int mem_idx; - TCGMemOpIdx oi; + MemOpIdx oi; Int128 oldv; bool fail; assert(HAVE_CMPXCHG128); mem_idx = cpu_mmu_index(env, false); - oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx); oldv = cpu_atomic_cmpxchgo_be_mmu(env, addr, cmpv, newv, oi, ra); fail = !int128_eq(oldv, cmpv); @@ -1884,7 +1875,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, uint32_t *haddr = g2h(env_cpu(env), a1); ov = qatomic_cmpxchg__nocheck(haddr, cv, nv); #else - TCGMemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx); + MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mem_idx); ov = cpu_atomic_cmpxchgl_be_mmu(env, a1, cv, nv, oi, ra); #endif } else { @@ -1904,7 +1895,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, if (parallel) { #ifdef CONFIG_ATOMIC64 - TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx); + MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN, mem_idx); ov = cpu_atomic_cmpxchgq_be_mmu(env, a1, cv, nv, oi, ra); #else /* Note that we asserted !parallel above. */ @@ -1940,7 +1931,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, cpu_stq_data_ra(env, a1 + 0, int128_gethi(nv), ra); cpu_stq_data_ra(env, a1 + 8, int128_getlo(nv), ra); } else if (HAVE_CMPXCHG128) { - TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + MemOpIdx oi = make_memop_idx(MO_TE | MO_128 | MO_ALIGN, mem_idx); ov = cpu_atomic_cmpxchgo_be_mmu(env, a1, cv, nv, oi, ra); cc = !int128_eq(ov, cv); } else { @@ -1979,7 +1970,7 @@ static uint32_t do_csst(CPUS390XState *env, uint32_t r3, uint64_t a1, cpu_stq_data_ra(env, a2 + 0, svh, ra); cpu_stq_data_ra(env, a2 + 8, svl, ra); } else if (HAVE_ATOMIC128) { - TCGMemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); + MemOpIdx oi = make_memop_idx(MO_TEQ | MO_ALIGN_16, mem_idx); Int128 sv = int128_make128(svl, svh); cpu_atomic_sto_be_mmu(env, a2, sv, oi, ra); } else { @@ -2497,7 +2488,7 @@ uint64_t HELPER(lpq_parallel)(CPUS390XState *env, uint64_t addr) uintptr_t ra = GETPC(); uint64_t hi, lo; int mem_idx; - TCGMemOpIdx oi; + MemOpIdx oi; Int128 v; assert(HAVE_ATOMIC128); @@ -2528,7 +2519,7 @@ void HELPER(stpq_parallel)(CPUS390XState *env, uint64_t addr, { uintptr_t ra = GETPC(); int mem_idx; - TCGMemOpIdx oi; + MemOpIdx oi; Int128 v; assert(HAVE_ATOMIC128); diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c index f284870cd2..dcc249a197 100644 --- a/target/s390x/tcg/translate.c +++ b/target/s390x/tcg/translate.c @@ -138,6 +138,7 @@ struct DisasFields { struct DisasContext { DisasContextBase base; const DisasInsn *insn; + TCGOp *insn_start; DisasFields fields; uint64_t ex_value; /* @@ -148,7 +149,6 @@ struct DisasContext { uint64_t pc_tmp; uint32_t ilen; enum cc_op cc_op; - bool do_debug; }; /* Information carried about a condition to be evaluated. */ @@ -6380,8 +6380,8 @@ static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) /* Search for the insn in the table. */ insn = extract_insn(env, s); - /* Emit insn_start now that we know the ILEN. */ - tcg_gen_insn_start(s->base.pc_next, s->cc_op, s->ilen); + /* Update insn_start now that we know the ILEN. */ + tcg_set_insn_start_param(s->insn_start, 2, s->ilen); /* Not found means unimplemented/illegal opcode. */ if (insn == NULL) { @@ -6543,7 +6543,6 @@ static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) dc->cc_op = CC_OP_DYNAMIC; dc->ex_value = dc->base.tb->cs_base; - dc->do_debug = dc->base.singlestep_enabled; } static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs) @@ -6552,6 +6551,11 @@ static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs) static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) { + DisasContext *dc = container_of(dcbase, DisasContext, base); + + /* Delay the set of ilen until we've read the insn. */ + tcg_gen_insn_start(dc->base.pc_next, dc->cc_op, 0); + dc->insn_start = tcg_last_op(); } static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) @@ -6590,10 +6594,8 @@ static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) /* FALLTHRU */ case DISAS_PC_CC_UPDATED: /* Exit the TB, either by raising a debug exception or by return. */ - if (dc->do_debug) { - gen_exception(EXCP_DEBUG); - } else if ((dc->base.tb->flags & FLAG_MASK_PER) || - dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) { + if ((dc->base.tb->flags & FLAG_MASK_PER) || + dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) { tcg_gen_exit_tb(NULL, 0); } else { tcg_gen_lookup_and_goto_ptr(); diff --git a/target/s390x/tcg/translate_vx.c.inc b/target/s390x/tcg/translate_vx.c.inc index 0afa46e463..28bf5a23b6 100644 --- a/target/s390x/tcg/translate_vx.c.inc +++ b/target/s390x/tcg/translate_vx.c.inc @@ -67,7 +67,7 @@ static void read_vec_element_i64(TCGv_i64 dst, uint8_t reg, uint8_t enr, { const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE); - switch (memop) { + switch ((unsigned)memop) { case ES_8: tcg_gen_ld8u_i64(dst, cpu_env, offs); break; diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c index 2047742d03..06b2691dc4 100644 --- a/target/sh4/cpu.c +++ b/target/sh4/cpu.c @@ -236,9 +236,9 @@ static const struct SysemuCPUOps sh4_sysemu_ops = { static const struct TCGCPUOps superh_tcg_ops = { .initialize = sh4_translate_init, .synchronize_from_tb = superh_cpu_synchronize_from_tb, - .tlb_fill = superh_cpu_tlb_fill, #ifndef CONFIG_USER_ONLY + .tlb_fill = superh_cpu_tlb_fill, .cpu_exec_interrupt = superh_cpu_exec_interrupt, .do_interrupt = superh_cpu_do_interrupt, .do_unaligned_access = superh_cpu_do_unaligned_access, diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h index dc81406646..4cfb109f56 100644 --- a/target/sh4/cpu.h +++ b/target/sh4/cpu.h @@ -213,12 +213,12 @@ void superh_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, uintptr_t retaddr) QEMU_NORETURN; void sh4_translate_init(void); +void sh4_cpu_list(void); + +#if !defined(CONFIG_USER_ONLY) bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); - -void sh4_cpu_list(void); -#if !defined(CONFIG_USER_ONLY) void superh_cpu_do_interrupt(CPUState *cpu); bool superh_cpu_exec_interrupt(CPUState *cpu, int int_req); void cpu_sh4_invalidate_tlb(CPUSH4State *s); diff --git a/target/sh4/helper.c b/target/sh4/helper.c index 53cb9c3b63..6a620e36fc 100644 --- a/target/sh4/helper.c +++ b/target/sh4/helper.c @@ -796,8 +796,6 @@ bool superh_cpu_exec_interrupt(CPUState *cs, int interrupt_request) return false; } -#endif /* !CONFIG_USER_ONLY */ - bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr) @@ -806,11 +804,6 @@ bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size, CPUSH4State *env = &cpu->env; int ret; -#ifdef CONFIG_USER_ONLY - ret = (access_type == MMU_DATA_STORE ? MMU_DTLB_VIOLATION_WRITE : - access_type == MMU_INST_FETCH ? MMU_ITLB_VIOLATION : - MMU_DTLB_VIOLATION_READ); -#else target_ulong physical; int prot; @@ -829,7 +822,6 @@ bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size, if (ret != MMU_DTLB_MULTIPLE && ret != MMU_ITLB_MULTIPLE) { env->pteh = (env->pteh & PTEH_ASID_MASK) | (address & PTEH_VPN_MASK); } -#endif env->tea = address; switch (ret) { @@ -868,3 +860,4 @@ bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size, } cpu_loop_exit_restore(cs, retaddr); } +#endif /* !CONFIG_USER_ONLY */ diff --git a/target/sh4/helper.h b/target/sh4/helper.h index 1e768fcbc7..8d792f6b55 100644 --- a/target/sh4/helper.h +++ b/target/sh4/helper.h @@ -3,7 +3,6 @@ DEF_HELPER_1(raise_illegal_instruction, noreturn, env) DEF_HELPER_1(raise_slot_illegal_instruction, noreturn, env) DEF_HELPER_1(raise_fpu_disable, noreturn, env) DEF_HELPER_1(raise_slot_fpu_disable, noreturn, env) -DEF_HELPER_1(debug, noreturn, env) DEF_HELPER_1(sleep, noreturn, env) DEF_HELPER_2(trapa, noreturn, env, i32) DEF_HELPER_1(exclusive, noreturn, env) diff --git a/target/sh4/op_helper.c b/target/sh4/op_helper.c index c0cbb95382..752669825f 100644 --- a/target/sh4/op_helper.c +++ b/target/sh4/op_helper.c @@ -29,6 +29,9 @@ void superh_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { + CPUSH4State *env = cs->env_ptr; + + env->tea = addr; switch (access_type) { case MMU_INST_FETCH: case MMU_DATA_LOAD: @@ -37,6 +40,8 @@ void superh_cpu_do_unaligned_access(CPUState *cs, vaddr addr, case MMU_DATA_STORE: cs->exception_index = 0x100; break; + default: + g_assert_not_reached(); } cpu_loop_exit_restore(cs, retaddr); } @@ -81,11 +86,6 @@ void helper_raise_slot_fpu_disable(CPUSH4State *env) raise_exception(env, 0x820, 0); } -void helper_debug(CPUSH4State *env) -{ - raise_exception(env, EXCP_DEBUG, 0); -} - void helper_sleep(CPUSH4State *env) { CPUState *cs = env_cpu(env); diff --git a/target/sh4/translate.c b/target/sh4/translate.c index cf5fe9243d..ce5d674a52 100644 --- a/target/sh4/translate.c +++ b/target/sh4/translate.c @@ -240,9 +240,7 @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) tcg_gen_exit_tb(ctx->base.tb, n); } else { tcg_gen_movi_i32(cpu_pc, dest); - if (ctx->base.singlestep_enabled) { - gen_helper_debug(cpu_env); - } else if (use_exit_tb(ctx)) { + if (use_exit_tb(ctx)) { tcg_gen_exit_tb(NULL, 0); } else { tcg_gen_lookup_and_goto_ptr(); @@ -258,9 +256,7 @@ static void gen_jump(DisasContext * ctx) delayed jump as immediate jump are conditinal jumps */ tcg_gen_mov_i32(cpu_pc, cpu_delayed_pc); tcg_gen_discard_i32(cpu_delayed_pc); - if (ctx->base.singlestep_enabled) { - gen_helper_debug(cpu_env); - } else if (use_exit_tb(ctx)) { + if (use_exit_tb(ctx)) { tcg_gen_exit_tb(NULL, 0); } else { tcg_gen_lookup_and_goto_ptr(); @@ -2324,11 +2320,7 @@ static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) switch (ctx->base.is_jmp) { case DISAS_STOP: gen_save_cpu_state(ctx, true); - if (ctx->base.singlestep_enabled) { - gen_helper_debug(cpu_env); - } else { - tcg_gen_exit_tb(NULL, 0); - } + tcg_gen_exit_tb(NULL, 0); break; case DISAS_NEXT: case DISAS_TOO_MANY: @@ -2344,7 +2336,7 @@ static void sh4_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) static void sh4_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs) { - qemu_log("IN:\n"); /* , lookup_symbol(dcbase->pc_first)); */ + qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first)); log_target_disas(cs, dcbase->pc_first, dcbase->tb->size); } diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c index 21dd27796d..55268ed2a1 100644 --- a/target/sparc/cpu.c +++ b/target/sparc/cpu.c @@ -865,9 +865,9 @@ static const struct SysemuCPUOps sparc_sysemu_ops = { static const struct TCGCPUOps sparc_tcg_ops = { .initialize = sparc_tcg_init, .synchronize_from_tb = sparc_cpu_synchronize_from_tb, - .tlb_fill = sparc_cpu_tlb_fill, #ifndef CONFIG_USER_ONLY + .tlb_fill = sparc_cpu_tlb_fill, .cpu_exec_interrupt = sparc_cpu_exec_interrupt, .do_interrupt = sparc_cpu_do_interrupt, .do_transaction_failed = sparc_cpu_do_transaction_failed, diff --git a/target/sparc/ldst_helper.c b/target/sparc/ldst_helper.c index 22327d7d72..a3e1cf9b6e 100644 --- a/target/sparc/ldst_helper.c +++ b/target/sparc/ldst_helper.c @@ -27,7 +27,6 @@ //#define DEBUG_MMU //#define DEBUG_MXCC -//#define DEBUG_UNALIGNED //#define DEBUG_UNASSIGNED //#define DEBUG_ASI //#define DEBUG_CACHE_CONTROL @@ -364,10 +363,6 @@ static void do_check_align(CPUSPARCState *env, target_ulong addr, uint32_t align, uintptr_t ra) { if (addr & align) { -#ifdef DEBUG_UNALIGNED - printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx - "\n", addr, env->pc); -#endif cpu_raise_exception_ra(env, TT_UNALIGNED, ra); } } @@ -1318,7 +1313,7 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, case ASI_SNF: case ASI_SNFL: { - TCGMemOpIdx oi; + MemOpIdx oi; int idx = (env->pstate & PS_PRIV ? (asi & 1 ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX) : (asi & 1 ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX)); @@ -1333,27 +1328,27 @@ uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr, oi = make_memop_idx(memop, idx); switch (size) { case 1: - ret = helper_ret_ldub_mmu(env, addr, oi, GETPC()); + ret = cpu_ldb_mmu(env, addr, oi, GETPC()); break; case 2: if (asi & 8) { - ret = helper_le_lduw_mmu(env, addr, oi, GETPC()); + ret = cpu_ldw_le_mmu(env, addr, oi, GETPC()); } else { - ret = helper_be_lduw_mmu(env, addr, oi, GETPC()); + ret = cpu_ldw_be_mmu(env, addr, oi, GETPC()); } break; case 4: if (asi & 8) { - ret = helper_le_ldul_mmu(env, addr, oi, GETPC()); + ret = cpu_ldl_le_mmu(env, addr, oi, GETPC()); } else { - ret = helper_be_ldul_mmu(env, addr, oi, GETPC()); + ret = cpu_ldl_be_mmu(env, addr, oi, GETPC()); } break; case 8: if (asi & 8) { - ret = helper_le_ldq_mmu(env, addr, oi, GETPC()); + ret = cpu_ldq_le_mmu(env, addr, oi, GETPC()); } else { - ret = helper_be_ldq_mmu(env, addr, oi, GETPC()); + ret = cpu_ldq_be_mmu(env, addr, oi, GETPC()); } break; default: @@ -1958,20 +1953,3 @@ void sparc_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, is_asi, size, retaddr); } #endif - -#if !defined(CONFIG_USER_ONLY) -void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, - MMUAccessType access_type, - int mmu_idx, - uintptr_t retaddr) -{ - SPARCCPU *cpu = SPARC_CPU(cs); - CPUSPARCState *env = &cpu->env; - -#ifdef DEBUG_UNALIGNED - printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx - "\n", addr, env->pc); -#endif - cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr); -} -#endif diff --git a/target/sparc/meson.build b/target/sparc/meson.build index a3638b9503..a801802ee2 100644 --- a/target/sparc/meson.build +++ b/target/sparc/meson.build @@ -6,7 +6,6 @@ sparc_ss.add(files( 'gdbstub.c', 'helper.c', 'ldst_helper.c', - 'mmu_helper.c', 'translate.c', 'win_helper.c', )) @@ -16,6 +15,7 @@ sparc_ss.add(when: 'TARGET_SPARC64', if_true: files('int64_helper.c', 'vis_helpe sparc_softmmu_ss = ss.source_set() sparc_softmmu_ss.add(files( 'machine.c', + 'mmu_helper.c', 'monitor.c', )) diff --git a/target/sparc/mmu_helper.c b/target/sparc/mmu_helper.c index a44473a1c7..f2668389b0 100644 --- a/target/sparc/mmu_helper.c +++ b/target/sparc/mmu_helper.c @@ -25,30 +25,6 @@ /* Sparc MMU emulation */ -#if defined(CONFIG_USER_ONLY) - -bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) -{ - SPARCCPU *cpu = SPARC_CPU(cs); - CPUSPARCState *env = &cpu->env; - - if (access_type == MMU_INST_FETCH) { - cs->exception_index = TT_TFAULT; - } else { - cs->exception_index = TT_DFAULT; -#ifdef TARGET_SPARC64 - env->dmmu.mmuregs[4] = address; -#else - env->mmuregs[4] = address; -#endif - } - cpu_loop_exit_restore(cs, retaddr); -} - -#else - #ifndef TARGET_SPARC64 /* * Sparc V8 Reference MMU (SRMMU) @@ -526,16 +502,60 @@ static inline int ultrasparc_tag_match(SparcTLBEntry *tlb, return 0; } +static uint64_t build_sfsr(CPUSPARCState *env, int mmu_idx, int rw) +{ + uint64_t sfsr = SFSR_VALID_BIT; + + switch (mmu_idx) { + case MMU_PHYS_IDX: + sfsr |= SFSR_CT_NOTRANS; + break; + case MMU_USER_IDX: + case MMU_KERNEL_IDX: + sfsr |= SFSR_CT_PRIMARY; + break; + case MMU_USER_SECONDARY_IDX: + case MMU_KERNEL_SECONDARY_IDX: + sfsr |= SFSR_CT_SECONDARY; + break; + case MMU_NUCLEUS_IDX: + sfsr |= SFSR_CT_NUCLEUS; + break; + default: + g_assert_not_reached(); + } + + if (rw == 1) { + sfsr |= SFSR_WRITE_BIT; + } else if (rw == 4) { + sfsr |= SFSR_NF_BIT; + } + + if (env->pstate & PS_PRIV) { + sfsr |= SFSR_PR_BIT; + } + + if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */ + sfsr |= SFSR_OW_BIT; /* overflow (not read before another fault) */ + } + + /* FIXME: ASI field in SFSR must be set */ + + return sfsr; +} + static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical, int *prot, MemTxAttrs *attrs, target_ulong address, int rw, int mmu_idx) { CPUState *cs = env_cpu(env); unsigned int i; + uint64_t sfsr; uint64_t context; - uint64_t sfsr = 0; bool is_user = false; + sfsr = build_sfsr(env, mmu_idx, rw); + switch (mmu_idx) { case MMU_PHYS_IDX: g_assert_not_reached(); @@ -544,29 +564,18 @@ static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical, /* fallthru */ case MMU_KERNEL_IDX: context = env->dmmu.mmu_primary_context & 0x1fff; - sfsr |= SFSR_CT_PRIMARY; break; case MMU_USER_SECONDARY_IDX: is_user = true; /* fallthru */ case MMU_KERNEL_SECONDARY_IDX: context = env->dmmu.mmu_secondary_context & 0x1fff; - sfsr |= SFSR_CT_SECONDARY; break; - case MMU_NUCLEUS_IDX: - sfsr |= SFSR_CT_NUCLEUS; - /* FALLTHRU */ default: context = 0; break; } - if (rw == 1) { - sfsr |= SFSR_WRITE_BIT; - } else if (rw == 4) { - sfsr |= SFSR_NF_BIT; - } - for (i = 0; i < 64; i++) { /* ctx match, vaddr match, valid? */ if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) { @@ -616,22 +625,9 @@ static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical, return 0; } - if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */ - sfsr |= SFSR_OW_BIT; /* overflow (not read before - another fault) */ - } - - if (env->pstate & PS_PRIV) { - sfsr |= SFSR_PR_BIT; - } - - /* FIXME: ASI field in SFSR must be set */ - env->dmmu.sfsr = sfsr | SFSR_VALID_BIT; - + env->dmmu.sfsr = sfsr; env->dmmu.sfar = address; /* Fault address register */ - env->dmmu.tag_access = (address & ~0x1fffULL) | context; - return 1; } } @@ -926,4 +922,23 @@ hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) } return phys_addr; } + +#ifndef CONFIG_USER_ONLY +void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, + int mmu_idx, + uintptr_t retaddr) +{ + SPARCCPU *cpu = SPARC_CPU(cs); + CPUSPARCState *env = &cpu->env; + +#ifdef TARGET_SPARC64 + env->dmmu.sfsr = build_sfsr(env, mmu_idx, access_type); + env->dmmu.sfar = addr; +#else + env->mmuregs[4] = addr; #endif + + cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr); +} +#endif /* !CONFIG_USER_ONLY */ diff --git a/target/tricore/helper.h b/target/tricore/helper.h index 78176aa17a..b64780c37d 100644 --- a/target/tricore/helper.h +++ b/target/tricore/helper.h @@ -153,4 +153,3 @@ DEF_HELPER_2(psw_write, void, env, i32) DEF_HELPER_1(psw_read, i32, env) /* Exceptions */ DEF_HELPER_3(raise_exception_sync, noreturn, env, i32, i32) -DEF_HELPER_2(qemu_excp, noreturn, env, i32) diff --git a/target/tricore/op_helper.c b/target/tricore/op_helper.c index 32c2bc1699..9476d10d00 100644 --- a/target/tricore/op_helper.c +++ b/target/tricore/op_helper.c @@ -107,13 +107,6 @@ static void raise_exception_sync_helper(CPUTriCoreState *env, uint32_t class, raise_exception_sync_internal(env, class, tin, pc, 0); } -void helper_qemu_excp(CPUTriCoreState *env, uint32_t excp) -{ - CPUState *cs = env_cpu(env); - cs->exception_index = excp; - cpu_loop_exit(cs); -} - /* Addressing mode helper */ static uint16_t reverse16(uint16_t val) diff --git a/target/tricore/translate.c b/target/tricore/translate.c index a0cc0f1cb3..07084407cb 100644 --- a/target/tricore/translate.c +++ b/target/tricore/translate.c @@ -3225,14 +3225,6 @@ static inline void gen_save_pc(target_ulong pc) tcg_gen_movi_tl(cpu_PC, pc); } -static void generate_qemu_excp(DisasContext *ctx, int excp) -{ - TCGv_i32 tmp = tcg_const_i32(excp); - gen_helper_qemu_excp(cpu_env, tmp); - ctx->base.is_jmp = DISAS_NORETURN; - tcg_temp_free(tmp); -} - static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) { if (translator_use_goto_tb(&ctx->base, dest)) { @@ -3241,11 +3233,7 @@ static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) tcg_gen_exit_tb(ctx->base.tb, n); } else { gen_save_pc(dest); - if (ctx->base.singlestep_enabled) { - generate_qemu_excp(ctx, EXCP_DEBUG); - } else { - tcg_gen_lookup_and_goto_ptr(); - } + tcg_gen_lookup_and_goto_ptr(); } } diff --git a/target/xtensa/cores.list b/target/xtensa/cores.list new file mode 100644 index 0000000000..5772a00ab2 --- /dev/null +++ b/target/xtensa/cores.list @@ -0,0 +1,9 @@ +core-dc232b.c +core-dc233c.c +core-de212.c +core-de233_fpu.c +core-dsp3400.c +core-fsf.c +core-sample_controller.c +core-test_kc705_be.c +core-test_mmuhifi_c3.c diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c index c1cbd03595..224f723236 100644 --- a/target/xtensa/cpu.c +++ b/target/xtensa/cpu.c @@ -192,10 +192,10 @@ static const struct SysemuCPUOps xtensa_sysemu_ops = { static const struct TCGCPUOps xtensa_tcg_ops = { .initialize = xtensa_translate_init, - .tlb_fill = xtensa_cpu_tlb_fill, .debug_excp_handler = xtensa_breakpoint_handler, #ifndef CONFIG_USER_ONLY + .tlb_fill = xtensa_cpu_tlb_fill, .cpu_exec_interrupt = xtensa_cpu_exec_interrupt, .do_interrupt = xtensa_cpu_do_interrupt, .do_transaction_failed = xtensa_cpu_do_transaction_failed, diff --git a/target/xtensa/cpu.h b/target/xtensa/cpu.h index f9a510ca46..02143f2f77 100644 --- a/target/xtensa/cpu.h +++ b/target/xtensa/cpu.h @@ -563,10 +563,10 @@ struct XtensaCPU { }; +#ifndef CONFIG_USER_ONLY bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); -#ifndef CONFIG_USER_ONLY void xtensa_cpu_do_interrupt(CPUState *cpu); bool xtensa_cpu_exec_interrupt(CPUState *cpu, int interrupt_request); void xtensa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, diff --git a/target/xtensa/helper.c b/target/xtensa/helper.c index f18ab383fd..29d216ec1b 100644 --- a/target/xtensa/helper.c +++ b/target/xtensa/helper.c @@ -242,27 +242,7 @@ void xtensa_cpu_list(void) } } -#ifdef CONFIG_USER_ONLY - -bool xtensa_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr) -{ - XtensaCPU *cpu = XTENSA_CPU(cs); - CPUXtensaState *env = &cpu->env; - - qemu_log_mask(CPU_LOG_INT, - "%s: rw = %d, address = 0x%08" VADDR_PRIx ", size = %d\n", - __func__, access_type, address, size); - env->sregs[EXCVADDR] = address; - env->sregs[EXCCAUSE] = (access_type == MMU_DATA_STORE ? - STORE_PROHIBITED_CAUSE : LOAD_PROHIBITED_CAUSE); - cs->exception_index = EXC_USER; - cpu_loop_exit_restore(cs, retaddr); -} - -#else /* !CONFIG_USER_ONLY */ - +#ifndef CONFIG_USER_ONLY void xtensa_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) diff --git a/target/xtensa/import_core.sh b/target/xtensa/import_core.sh index 396b264be9..df66d09393 100755 --- a/target/xtensa/import_core.sh +++ b/target/xtensa/import_core.sh @@ -66,3 +66,6 @@ static XtensaConfig $NAME __attribute__((unused)) = { REGISTER_CORE($NAME) EOF + +grep -qxf core-${NAME}.c "$BASE"/cores.list || \ + echo core-${NAME}.c >> "$BASE"/cores.list diff --git a/target/xtensa/meson.build b/target/xtensa/meson.build index 7c4efa6c62..20bbf9b335 100644 --- a/target/xtensa/meson.build +++ b/target/xtensa/meson.build @@ -1,7 +1,7 @@ xtensa_ss = ss.source_set() -xtensa_cores = run_command('sh', '-c', 'cd $MESON_SOURCE_ROOT/$MESON_SUBDIR ; ls -1 core-*.c') -xtensa_ss.add(files(xtensa_cores.stdout().strip().split('\n'))) +xtensa_cores = fs.read('cores.list') +xtensa_ss.add(files(xtensa_cores.strip().split('\n'))) xtensa_ss.add(files( 'cpu.c', diff --git a/target/xtensa/mmu_helper.c b/target/xtensa/mmu_helper.c index b01ff9399a..57e319a1af 100644 --- a/target/xtensa/mmu_helper.c +++ b/target/xtensa/mmu_helper.c @@ -1098,7 +1098,7 @@ static void dump_tlb(CPUXtensaState *env, bool dtlb) qemu_printf("\tVaddr Paddr ASID Attr RWX Cache\n" "\t---------- ---------- ---- ---- --- -------\n"); } - qemu_printf("\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c %-7s\n", + qemu_printf("\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c %s\n", entry->vaddr, entry->paddr, entry->asid, diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c index dcf6b500ef..09430c1bf9 100644 --- a/target/xtensa/translate.c +++ b/target/xtensa/translate.c @@ -382,18 +382,14 @@ static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot) if (dc->icount) { tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount); } - if (dc->base.singlestep_enabled) { - gen_exception(dc, EXCP_DEBUG); + if (dc->op_flags & XTENSA_OP_POSTPROCESS) { + slot = gen_postprocess(dc, slot); + } + if (slot >= 0) { + tcg_gen_goto_tb(slot); + tcg_gen_exit_tb(dc->base.tb, slot); } else { - if (dc->op_flags & XTENSA_OP_POSTPROCESS) { - slot = gen_postprocess(dc, slot); - } - if (slot >= 0) { - tcg_gen_goto_tb(slot); - tcg_gen_exit_tb(dc->base.tb, slot); - } else { - tcg_gen_exit_tb(NULL, 0); - } + tcg_gen_exit_tb(NULL, 0); } dc->base.is_jmp = DISAS_NORETURN; } @@ -1293,12 +1289,7 @@ static void xtensa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) case DISAS_NORETURN: break; case DISAS_TOO_MANY: - if (dc->base.singlestep_enabled) { - tcg_gen_movi_i32(cpu_pc, dc->pc); - gen_exception(dc, EXCP_DEBUG); - } else { - gen_jumpi(dc, dc->pc, 0); - } + gen_jumpi(dc, dc->pc, 0); break; default: g_assert_not_reached(); diff --git a/tcg/README b/tcg/README index c2e7762a37..bc15cc3b32 100644 --- a/tcg/README +++ b/tcg/README @@ -254,6 +254,12 @@ t0 = t1 ? clz(t1) : t2 t0 = t1 ? ctz(t1) : t2 +* ctpop_i32/i64 t0, t1 + +t0 = number of bits set in t1 +With "ctpop" short for "count population", matching +the function name used in include/qemu/host-utils.h. + ********* Shifts/Rotates * shl_i32/i64 t0, t1, t2 diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc index 5924977b42..5edca8d44d 100644 --- a/tcg/aarch64/tcg-target.c.inc +++ b/tcg/aarch64/tcg-target.c.inc @@ -1545,9 +1545,9 @@ static void tcg_out_cltz(TCGContext *s, TCGType ext, TCGReg d, #include "../tcg-ldst.c.inc" /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, - * TCGMemOpIdx oi, uintptr_t ra) + * MemOpIdx oi, uintptr_t ra) */ -static void * const qemu_ld_helpers[4] = { +static void * const qemu_ld_helpers[MO_SIZE + 1] = { [MO_8] = helper_ret_ldub_mmu, #ifdef HOST_WORDS_BIGENDIAN [MO_16] = helper_be_lduw_mmu, @@ -1561,10 +1561,10 @@ static void * const qemu_ld_helpers[4] = { }; /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, - * uintxx_t val, TCGMemOpIdx oi, + * uintxx_t val, MemOpIdx oi, * uintptr_t ra) */ -static void * const qemu_st_helpers[4] = { +static void * const qemu_st_helpers[MO_SIZE + 1] = { [MO_8] = helper_ret_stb_mmu, #ifdef HOST_WORDS_BIGENDIAN [MO_16] = helper_be_stw_mmu, @@ -1586,7 +1586,7 @@ static inline void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target) static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { - TCGMemOpIdx oi = lb->oi; + MemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); MemOp size = opc & MO_SIZE; @@ -1611,7 +1611,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { - TCGMemOpIdx oi = lb->oi; + MemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); MemOp size = opc & MO_SIZE; @@ -1629,7 +1629,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) return true; } -static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, TCGType ext, TCGReg data_reg, TCGReg addr_reg, tcg_insn_unit *raddr, tcg_insn_unit *label_ptr) { @@ -1778,7 +1778,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop, } static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, - TCGMemOpIdx oi, TCGType ext) + MemOpIdx oi, TCGType ext) { MemOp memop = get_memop(oi); const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; @@ -1803,7 +1803,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, } static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, - TCGMemOpIdx oi) + MemOpIdx oi) { MemOp memop = get_memop(oi); const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32; diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc index d25e68b36b..633b8a37ba 100644 --- a/tcg/arm/tcg-target.c.inc +++ b/tcg/arm/tcg-target.c.inc @@ -1437,7 +1437,7 @@ static void tcg_out_vldst(TCGContext *s, ARMInsn insn, /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, * int mmu_idx, uintptr_t ra) */ -static void * const qemu_ld_helpers[8] = { +static void * const qemu_ld_helpers[MO_SSIZE + 1] = { [MO_UB] = helper_ret_ldub_mmu, [MO_SB] = helper_ret_ldsb_mmu, #ifdef HOST_WORDS_BIGENDIAN @@ -1458,7 +1458,7 @@ static void * const qemu_ld_helpers[8] = { /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, * uintxx_t val, int mmu_idx, uintptr_t ra) */ -static void * const qemu_st_helpers[4] = { +static void * const qemu_st_helpers[MO_SIZE + 1] = { [MO_8] = helper_ret_stb_mmu, #ifdef HOST_WORDS_BIGENDIAN [MO_16] = helper_be_stw_mmu, @@ -1632,7 +1632,7 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi, /* Record the context of a call to the out of line helper code for the slow path for a load or store, so that we can later generate the correct helper code. */ -static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addrhi, tcg_insn_unit *raddr, tcg_insn_unit *label_ptr) @@ -1652,7 +1652,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg argreg, datalo, datahi; - TCGMemOpIdx oi = lb->oi; + MemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); void *func; @@ -1716,7 +1716,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg argreg, datalo, datahi; - TCGMemOpIdx oi = lb->oi; + MemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); if (!reloc_pc24(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { @@ -1846,7 +1846,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg datalo, static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) { TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); - TCGMemOpIdx oi; + MemOpIdx oi; MemOp opc; #ifdef CONFIG_SOFTMMU int mem_index; @@ -1952,7 +1952,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg datalo, static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) { TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused)); - TCGMemOpIdx oi; + MemOpIdx oi; MemOp opc; #ifdef CONFIG_SOFTMMU int mem_index; diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index 997510109d..84b109bb84 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -1611,7 +1611,7 @@ static void tcg_out_nopn(TCGContext *s, int n) /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, * int mmu_idx, uintptr_t ra) */ -static void * const qemu_ld_helpers[16] = { +static void * const qemu_ld_helpers[(MO_SIZE | MO_BSWAP) + 1] = { [MO_UB] = helper_ret_ldub_mmu, [MO_LEUW] = helper_le_lduw_mmu, [MO_LEUL] = helper_le_ldul_mmu, @@ -1624,7 +1624,7 @@ static void * const qemu_ld_helpers[16] = { /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, * uintxx_t val, int mmu_idx, uintptr_t ra) */ -static void * const qemu_st_helpers[16] = { +static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = { [MO_UB] = helper_ret_stb_mmu, [MO_LEUW] = helper_le_stw_mmu, [MO_LEUL] = helper_le_stl_mmu, @@ -1741,7 +1741,7 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi, * for a load or store, so that we can later generate the correct helper code */ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64, - TCGMemOpIdx oi, + MemOpIdx oi, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addrhi, tcg_insn_unit *raddr, @@ -1768,7 +1768,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, bool is_64, */ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { - TCGMemOpIdx oi = l->oi; + MemOpIdx oi = l->oi; MemOp opc = get_memop(oi); TCGReg data_reg; tcg_insn_unit **label_ptr = &l->label_ptr[0]; @@ -1853,7 +1853,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) */ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { - TCGMemOpIdx oi = l->oi; + MemOpIdx oi = l->oi; MemOp opc = get_memop(oi); MemOp s_bits = opc & MO_SIZE; tcg_insn_unit **label_ptr = &l->label_ptr[0]; @@ -2054,7 +2054,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64) { TCGReg datalo, datahi, addrlo; TCGReg addrhi __attribute__((unused)); - TCGMemOpIdx oi; + MemOpIdx oi; MemOp opc; #if defined(CONFIG_SOFTMMU) int mem_index; @@ -2143,7 +2143,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64) { TCGReg datalo, datahi, addrlo; TCGReg addrhi __attribute__((unused)); - TCGMemOpIdx oi; + MemOpIdx oi; MemOp opc; #if defined(CONFIG_SOFTMMU) int mem_index; diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc index 41ffa28394..d8f6914f03 100644 --- a/tcg/mips/tcg-target.c.inc +++ b/tcg/mips/tcg-target.c.inc @@ -1017,7 +1017,7 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg) #if defined(CONFIG_SOFTMMU) #include "../tcg-ldst.c.inc" -static void * const qemu_ld_helpers[16] = { +static void * const qemu_ld_helpers[(MO_SSIZE | MO_BSWAP) + 1] = { [MO_UB] = helper_ret_ldub_mmu, [MO_SB] = helper_ret_ldsb_mmu, [MO_LEUW] = helper_le_lduw_mmu, @@ -1034,7 +1034,7 @@ static void * const qemu_ld_helpers[16] = { #endif }; -static void * const qemu_st_helpers[16] = { +static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = { [MO_UB] = helper_ret_stb_mmu, [MO_LEUW] = helper_le_stw_mmu, [MO_LEUL] = helper_le_stl_mmu, @@ -1120,7 +1120,7 @@ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768); * Clobbers TMP0, TMP1, TMP2, TMP3. */ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl, - TCGReg addrh, TCGMemOpIdx oi, + TCGReg addrh, MemOpIdx oi, tcg_insn_unit *label_ptr[2], bool is_load) { MemOp opc = get_memop(oi); @@ -1196,7 +1196,7 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl, tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP2, addrl); } -static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, +static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi, TCGType ext, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addrhi, @@ -1221,7 +1221,7 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { const tcg_insn_unit *tgt_rx = tcg_splitwx_to_rx(s->code_ptr); - TCGMemOpIdx oi = l->oi; + MemOpIdx oi = l->oi; MemOp opc = get_memop(oi); TCGReg v0; int i; @@ -1275,7 +1275,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { const tcg_insn_unit *tgt_rx = tcg_splitwx_to_rx(s->code_ptr); - TCGMemOpIdx oi = l->oi; + MemOpIdx oi = l->oi; MemOp opc = get_memop(oi); MemOp s_bits = opc & MO_SIZE; int i; @@ -1434,7 +1434,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) { TCGReg addr_regl, addr_regh __attribute__((unused)); TCGReg data_regl, data_regh; - TCGMemOpIdx oi; + MemOpIdx oi; MemOp opc; #if defined(CONFIG_SOFTMMU) tcg_insn_unit *label_ptr[2]; @@ -1536,7 +1536,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) { TCGReg addr_regl, addr_regh __attribute__((unused)); TCGReg data_regl, data_regh; - TCGMemOpIdx oi; + MemOpIdx oi; MemOp opc; #if defined(CONFIG_SOFTMMU) tcg_insn_unit *label_ptr[2]; diff --git a/tcg/optimize.c b/tcg/optimize.c index 9876ac52a8..2397f2cf93 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -24,6 +24,7 @@ */ #include "qemu/osdep.h" +#include "qemu/int128.h" #include "tcg/tcg-op.h" #include "tcg-internal.h" @@ -41,9 +42,61 @@ typedef struct TempOptInfo { TCGTemp *prev_copy; TCGTemp *next_copy; uint64_t val; - uint64_t mask; + uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */ + uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */ } TempOptInfo; +typedef struct OptContext { + TCGContext *tcg; + TCGOp *prev_mb; + TCGTempSet temps_used; + + /* In flight values from optimization. */ + uint64_t a_mask; /* mask bit is 0 iff value identical to first input */ + uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */ + uint64_t s_mask; /* mask of clrsb(value) bits */ + TCGType type; +} OptContext; + +/* Calculate the smask for a specific value. */ +static uint64_t smask_from_value(uint64_t value) +{ + int rep = clrsb64(value); + return ~(~0ull >> rep); +} + +/* + * Calculate the smask for a given set of known-zeros. + * If there are lots of zeros on the left, we can consider the remainder + * an unsigned field, and thus the corresponding signed field is one bit + * larger. + */ +static uint64_t smask_from_zmask(uint64_t zmask) +{ + /* + * Only the 0 bits are significant for zmask, thus the msb itself + * must be zero, else we have no sign information. + */ + int rep = clz64(zmask); + if (rep == 0) { + return 0; + } + rep -= 1; + return ~(~0ull >> rep); +} + +/* + * Recreate a properly left-aligned smask after manipulation. + * Some bit-shuffling, particularly shifts and rotates, may + * retain sign bits on the left, but may scatter disconnected + * sign bits on the right. Retain only what remains to the left. + */ +static uint64_t smask_from_smask(int64_t smask) +{ + /* Only the 1 bits are significant for smask */ + return smask_from_zmask(~smask); +} + static inline TempOptInfo *ts_info(TCGTemp *ts) { return ts->state_ptr; @@ -81,7 +134,8 @@ static void reset_ts(TCGTemp *ts) ti->next_copy = ts; ti->prev_copy = ts; ti->is_const = false; - ti->mask = -1; + ti->z_mask = -1; + ti->s_mask = 0; } static void reset_temp(TCGArg arg) @@ -90,15 +144,15 @@ static void reset_temp(TCGArg arg) } /* Initialize and activate a temporary. */ -static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts) +static void init_ts_info(OptContext *ctx, TCGTemp *ts) { size_t idx = temp_idx(ts); TempOptInfo *ti; - if (test_bit(idx, temps_used->l)) { + if (test_bit(idx, ctx->temps_used.l)) { return; } - set_bit(idx, temps_used->l); + set_bit(idx, ctx->temps_used.l); ti = ts->state_ptr; if (ti == NULL) { @@ -111,22 +165,15 @@ static void init_ts_info(TCGTempSet *temps_used, TCGTemp *ts) if (ts->kind == TEMP_CONST) { ti->is_const = true; ti->val = ts->val; - ti->mask = ts->val; - if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) { - /* High bits of a 32-bit quantity are garbage. */ - ti->mask |= ~0xffffffffull; - } + ti->z_mask = ts->val; + ti->s_mask = smask_from_value(ts->val); } else { ti->is_const = false; - ti->mask = -1; + ti->z_mask = -1; + ti->s_mask = 0; } } -static void init_arg_info(TCGTempSet *temps_used, TCGArg arg) -{ - init_ts_info(temps_used, arg_temp(arg)); -} - static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts) { TCGTemp *i, *g, *l; @@ -179,43 +226,45 @@ static bool args_are_copies(TCGArg arg1, TCGArg arg2) return ts_are_copies(arg_temp(arg1), arg_temp(arg2)); } -static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src) +static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) { TCGTemp *dst_ts = arg_temp(dst); TCGTemp *src_ts = arg_temp(src); - const TCGOpDef *def; TempOptInfo *di; TempOptInfo *si; - uint64_t mask; TCGOpcode new_op; if (ts_are_copies(dst_ts, src_ts)) { - tcg_op_remove(s, op); - return; + tcg_op_remove(ctx->tcg, op); + return true; } reset_ts(dst_ts); di = ts_info(dst_ts); si = ts_info(src_ts); - def = &tcg_op_defs[op->opc]; - if (def->flags & TCG_OPF_VECTOR) { - new_op = INDEX_op_mov_vec; - } else if (def->flags & TCG_OPF_64BIT) { - new_op = INDEX_op_mov_i64; - } else { + + switch (ctx->type) { + case TCG_TYPE_I32: new_op = INDEX_op_mov_i32; + break; + case TCG_TYPE_I64: + new_op = INDEX_op_mov_i64; + break; + case TCG_TYPE_V64: + case TCG_TYPE_V128: + case TCG_TYPE_V256: + /* TCGOP_VECL and TCGOP_VECE remain unchanged. */ + new_op = INDEX_op_mov_vec; + break; + default: + g_assert_not_reached(); } op->opc = new_op; - /* TCGOP_VECL and TCGOP_VECE remain unchanged. */ op->args[0] = dst; op->args[1] = src; - mask = si->mask; - if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) { - /* High bits of the destination are now garbage. */ - mask |= ~0xffffffffull; - } - di->mask = mask; + di->z_mask = si->z_mask; + di->s_mask = si->s_mask; if (src_ts->type == dst_ts->type) { TempOptInfo *ni = ts_info(si->next_copy); @@ -227,27 +276,22 @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg dst, TCGArg src) di->is_const = si->is_const; di->val = si->val; } + return true; } -static void tcg_opt_gen_movi(TCGContext *s, TCGTempSet *temps_used, - TCGOp *op, TCGArg dst, uint64_t val) +static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op, + TCGArg dst, uint64_t val) { - const TCGOpDef *def = &tcg_op_defs[op->opc]; - TCGType type; TCGTemp *tv; - if (def->flags & TCG_OPF_VECTOR) { - type = TCGOP_VECL(op) + TCG_TYPE_V64; - } else if (def->flags & TCG_OPF_64BIT) { - type = TCG_TYPE_I64; - } else { - type = TCG_TYPE_I32; + if (ctx->type == TCG_TYPE_I32) { + val = (int32_t)val; } /* Convert movi to mov with constant temp. */ - tv = tcg_constant_internal(type, val); - init_ts_info(temps_used, tv); - tcg_opt_gen_mov(s, op, dst, temp_arg(tv)); + tv = tcg_constant_internal(ctx->type, val); + init_ts_info(ctx, tv); + return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv)); } static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y) @@ -415,11 +459,11 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y) } } -static uint64_t do_constant_folding(TCGOpcode op, uint64_t x, uint64_t y) +static uint64_t do_constant_folding(TCGOpcode op, TCGType type, + uint64_t x, uint64_t y) { - const TCGOpDef *def = &tcg_op_defs[op]; uint64_t res = do_constant_folding_2(op, x, y); - if (!(def->flags & TCG_OPF_64BIT)) { + if (type == TCG_TYPE_I32) { res = (int32_t)res; } return res; @@ -501,21 +545,25 @@ static bool do_constant_folding_cond_eq(TCGCond c) } } -/* Return 2 if the condition can't be simplified, and the result - of the condition (0 or 1) if it can */ -static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x, - TCGArg y, TCGCond c) +/* + * Return -1 if the condition can't be simplified, + * and the result of the condition (0 or 1) if it can. + */ +static int do_constant_folding_cond(TCGType type, TCGArg x, + TCGArg y, TCGCond c) { uint64_t xv = arg_info(x)->val; uint64_t yv = arg_info(y)->val; if (arg_is_const(x) && arg_is_const(y)) { - const TCGOpDef *def = &tcg_op_defs[op]; - tcg_debug_assert(!(def->flags & TCG_OPF_VECTOR)); - if (def->flags & TCG_OPF_64BIT) { - return do_constant_folding_cond_64(xv, yv, c); - } else { + switch (type) { + case TCG_TYPE_I32: return do_constant_folding_cond_32(xv, yv, c); + case TCG_TYPE_I64: + return do_constant_folding_cond_64(xv, yv, c); + default: + /* Only scalar comparisons are optimizable */ + return -1; } } else if (args_are_copies(x, y)) { return do_constant_folding_cond_eq(c); @@ -526,15 +574,17 @@ static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x, case TCG_COND_GEU: return 1; default: - return 2; + return -1; } } - return 2; + return -1; } -/* Return 2 if the condition can't be simplified, and the result - of the condition (0 or 1) if it can */ -static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c) +/* + * Return -1 if the condition can't be simplified, + * and the result of the condition (0 or 1) if it can. + */ +static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c) { TCGArg al = p1[0], ah = p1[1]; TCGArg bl = p2[0], bh = p2[1]; @@ -564,9 +614,22 @@ static TCGArg do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c) if (args_are_copies(al, bl) && args_are_copies(ah, bh)) { return do_constant_folding_cond_eq(c); } - return 2; + return -1; } +/** + * swap_commutative: + * @dest: TCGArg of the destination argument, or NO_DEST. + * @p1: first paired argument + * @p2: second paired argument + * + * If *@p1 is a constant and *@p2 is not, swap. + * If *@p2 matches @dest, swap. + * Return true if a swap was performed. + */ + +#define NO_DEST temp_arg(NULL) + static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2) { TCGArg a1 = *p1, a2 = *p2; @@ -600,12 +663,1350 @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2) return false; } +static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args) +{ + for (int i = 0; i < nb_args; i++) { + TCGTemp *ts = arg_temp(op->args[i]); + if (ts) { + init_ts_info(ctx, ts); + } + } +} + +static void copy_propagate(OptContext *ctx, TCGOp *op, + int nb_oargs, int nb_iargs) +{ + TCGContext *s = ctx->tcg; + + for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) { + TCGTemp *ts = arg_temp(op->args[i]); + if (ts && ts_is_copy(ts)) { + op->args[i] = temp_arg(find_better_copy(s, ts)); + } + } +} + +static void finish_folding(OptContext *ctx, TCGOp *op) +{ + const TCGOpDef *def = &tcg_op_defs[op->opc]; + int i, nb_oargs; + + /* + * For an opcode that ends a BB, reset all temp data. + * We do no cross-BB optimization. + */ + if (def->flags & TCG_OPF_BB_END) { + memset(&ctx->temps_used, 0, sizeof(ctx->temps_used)); + ctx->prev_mb = NULL; + return; + } + + nb_oargs = def->nb_oargs; + for (i = 0; i < nb_oargs; i++) { + TCGTemp *ts = arg_temp(op->args[i]); + reset_ts(ts); + /* + * Save the corresponding known-zero/sign bits mask for the + * first output argument (only one supported so far). + */ + if (i == 0) { + ts_info(ts)->z_mask = ctx->z_mask; + ts_info(ts)->s_mask = ctx->s_mask; + } + } +} + +/* + * The fold_* functions return true when processing is complete, + * usually by folding the operation to a constant or to a copy, + * and calling tcg_opt_gen_{mov,movi}. They may do other things, + * like collect information about the value produced, for use in + * optimizing a subsequent operation. + * + * These first fold_* functions are all helpers, used by other + * folders for more specific operations. + */ + +static bool fold_const1(OptContext *ctx, TCGOp *op) +{ + if (arg_is_const(op->args[1])) { + uint64_t t; + + t = arg_info(op->args[1])->val; + t = do_constant_folding(op->opc, ctx->type, t, 0); + return tcg_opt_gen_movi(ctx, op, op->args[0], t); + } + return false; +} + +static bool fold_const2(OptContext *ctx, TCGOp *op) +{ + if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { + uint64_t t1 = arg_info(op->args[1])->val; + uint64_t t2 = arg_info(op->args[2])->val; + + t1 = do_constant_folding(op->opc, ctx->type, t1, t2); + return tcg_opt_gen_movi(ctx, op, op->args[0], t1); + } + return false; +} + +static bool fold_const2_commutative(OptContext *ctx, TCGOp *op) +{ + swap_commutative(op->args[0], &op->args[1], &op->args[2]); + return fold_const2(ctx, op); +} + +static bool fold_masks(OptContext *ctx, TCGOp *op) +{ + uint64_t a_mask = ctx->a_mask; + uint64_t z_mask = ctx->z_mask; + uint64_t s_mask = ctx->s_mask; + + /* + * 32-bit ops generate 32-bit results, which for the purpose of + * simplifying tcg are sign-extended. Certainly that's how we + * represent our constants elsewhere. Note that the bits will + * be reset properly for a 64-bit value when encountering the + * type changing opcodes. + */ + if (ctx->type == TCG_TYPE_I32) { + a_mask = (int32_t)a_mask; + z_mask = (int32_t)z_mask; + s_mask |= MAKE_64BIT_MASK(32, 32); + ctx->z_mask = z_mask; + ctx->s_mask = s_mask; + } + + if (z_mask == 0) { + return tcg_opt_gen_movi(ctx, op, op->args[0], 0); + } + if (a_mask == 0) { + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); + } + return false; +} + +/* + * Convert @op to NOT, if NOT is supported by the host. + * Return true f the conversion is successful, which will still + * indicate that the processing is complete. + */ +static bool fold_not(OptContext *ctx, TCGOp *op); +static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx) +{ + TCGOpcode not_op; + bool have_not; + + switch (ctx->type) { + case TCG_TYPE_I32: + not_op = INDEX_op_not_i32; + have_not = TCG_TARGET_HAS_not_i32; + break; + case TCG_TYPE_I64: + not_op = INDEX_op_not_i64; + have_not = TCG_TARGET_HAS_not_i64; + break; + case TCG_TYPE_V64: + case TCG_TYPE_V128: + case TCG_TYPE_V256: + not_op = INDEX_op_not_vec; + have_not = TCG_TARGET_HAS_not_vec; + break; + default: + g_assert_not_reached(); + } + if (have_not) { + op->opc = not_op; + op->args[1] = op->args[idx]; + return fold_not(ctx, op); + } + return false; +} + +/* If the binary operation has first argument @i, fold to @i. */ +static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i) +{ + if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) { + return tcg_opt_gen_movi(ctx, op, op->args[0], i); + } + return false; +} + +/* If the binary operation has first argument @i, fold to NOT. */ +static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i) +{ + if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) { + return fold_to_not(ctx, op, 2); + } + return false; +} + +/* If the binary operation has second argument @i, fold to @i. */ +static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i) +{ + if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) { + return tcg_opt_gen_movi(ctx, op, op->args[0], i); + } + return false; +} + +/* If the binary operation has second argument @i, fold to identity. */ +static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i) +{ + if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) { + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); + } + return false; +} + +/* If the binary operation has second argument @i, fold to NOT. */ +static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i) +{ + if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) { + return fold_to_not(ctx, op, 1); + } + return false; +} + +/* If the binary operation has both arguments equal, fold to @i. */ +static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i) +{ + if (args_are_copies(op->args[1], op->args[2])) { + return tcg_opt_gen_movi(ctx, op, op->args[0], i); + } + return false; +} + +/* If the binary operation has both arguments equal, fold to identity. */ +static bool fold_xx_to_x(OptContext *ctx, TCGOp *op) +{ + if (args_are_copies(op->args[1], op->args[2])) { + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); + } + return false; +} + +/* + * These outermost fold_ functions are sorted alphabetically. + * + * The ordering of the transformations should be: + * 1) those that produce a constant + * 2) those that produce a copy + * 3) those that produce information about the result value. + */ + +static bool fold_add(OptContext *ctx, TCGOp *op) +{ + if (fold_const2_commutative(ctx, op) || + fold_xi_to_x(ctx, op, 0)) { + return true; + } + return false; +} + +static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add) +{ + if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) && + arg_is_const(op->args[4]) && arg_is_const(op->args[5])) { + uint64_t al = arg_info(op->args[2])->val; + uint64_t ah = arg_info(op->args[3])->val; + uint64_t bl = arg_info(op->args[4])->val; + uint64_t bh = arg_info(op->args[5])->val; + TCGArg rl, rh; + TCGOp *op2; + + if (ctx->type == TCG_TYPE_I32) { + uint64_t a = deposit64(al, 32, 32, ah); + uint64_t b = deposit64(bl, 32, 32, bh); + + if (add) { + a += b; + } else { + a -= b; + } + + al = sextract64(a, 0, 32); + ah = sextract64(a, 32, 32); + } else { + Int128 a = int128_make128(al, ah); + Int128 b = int128_make128(bl, bh); + + if (add) { + a = int128_add(a, b); + } else { + a = int128_sub(a, b); + } + + al = int128_getlo(a); + ah = int128_gethi(a); + } + + rl = op->args[0]; + rh = op->args[1]; + + /* The proper opcode is supplied by tcg_opt_gen_mov. */ + op2 = tcg_op_insert_before(ctx->tcg, op, 0); + + tcg_opt_gen_movi(ctx, op, rl, al); + tcg_opt_gen_movi(ctx, op2, rh, ah); + return true; + } + return false; +} + +static bool fold_add2(OptContext *ctx, TCGOp *op) +{ + /* Note that the high and low parts may be independently swapped. */ + swap_commutative(op->args[0], &op->args[2], &op->args[4]); + swap_commutative(op->args[1], &op->args[3], &op->args[5]); + + return fold_addsub2(ctx, op, true); +} + +static bool fold_and(OptContext *ctx, TCGOp *op) +{ + uint64_t z1, z2; + + if (fold_const2_commutative(ctx, op) || + fold_xi_to_i(ctx, op, 0) || + fold_xi_to_x(ctx, op, -1) || + fold_xx_to_x(ctx, op)) { + return true; + } + + z1 = arg_info(op->args[1])->z_mask; + z2 = arg_info(op->args[2])->z_mask; + ctx->z_mask = z1 & z2; + + /* + * Sign repetitions are perforce all identical, whether they are 1 or 0. + * Bitwise operations preserve the relative quantity of the repetitions. + */ + ctx->s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; + + /* + * Known-zeros does not imply known-ones. Therefore unless + * arg2 is constant, we can't infer affected bits from it. + */ + if (arg_is_const(op->args[2])) { + ctx->a_mask = z1 & ~z2; + } + + return fold_masks(ctx, op); +} + +static bool fold_andc(OptContext *ctx, TCGOp *op) +{ + uint64_t z1; + + if (fold_const2(ctx, op) || + fold_xx_to_i(ctx, op, 0) || + fold_xi_to_x(ctx, op, 0) || + fold_ix_to_not(ctx, op, -1)) { + return true; + } + + z1 = arg_info(op->args[1])->z_mask; + + /* + * Known-zeros does not imply known-ones. Therefore unless + * arg2 is constant, we can't infer anything from it. + */ + if (arg_is_const(op->args[2])) { + uint64_t z2 = ~arg_info(op->args[2])->z_mask; + ctx->a_mask = z1 & ~z2; + z1 &= z2; + } + ctx->z_mask = z1; + + ctx->s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; + return fold_masks(ctx, op); +} + +static bool fold_brcond(OptContext *ctx, TCGOp *op) +{ + TCGCond cond = op->args[2]; + int i; + + if (swap_commutative(NO_DEST, &op->args[0], &op->args[1])) { + op->args[2] = cond = tcg_swap_cond(cond); + } + + i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond); + if (i == 0) { + tcg_op_remove(ctx->tcg, op); + return true; + } + if (i > 0) { + op->opc = INDEX_op_br; + op->args[0] = op->args[3]; + } + return false; +} + +static bool fold_brcond2(OptContext *ctx, TCGOp *op) +{ + TCGCond cond = op->args[4]; + TCGArg label = op->args[5]; + int i, inv = 0; + + if (swap_commutative2(&op->args[0], &op->args[2])) { + op->args[4] = cond = tcg_swap_cond(cond); + } + + i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond); + if (i >= 0) { + goto do_brcond_const; + } + + switch (cond) { + case TCG_COND_LT: + case TCG_COND_GE: + /* + * Simplify LT/GE comparisons vs zero to a single compare + * vs the high word of the input. + */ + if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == 0 && + arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0) { + goto do_brcond_high; + } + break; + + case TCG_COND_NE: + inv = 1; + QEMU_FALLTHROUGH; + case TCG_COND_EQ: + /* + * Simplify EQ/NE comparisons where one of the pairs + * can be simplified. + */ + i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0], + op->args[2], cond); + switch (i ^ inv) { + case 0: + goto do_brcond_const; + case 1: + goto do_brcond_high; + } + + i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1], + op->args[3], cond); + switch (i ^ inv) { + case 0: + goto do_brcond_const; + case 1: + op->opc = INDEX_op_brcond_i32; + op->args[1] = op->args[2]; + op->args[2] = cond; + op->args[3] = label; + break; + } + break; + + default: + break; + + do_brcond_high: + op->opc = INDEX_op_brcond_i32; + op->args[0] = op->args[1]; + op->args[1] = op->args[3]; + op->args[2] = cond; + op->args[3] = label; + break; + + do_brcond_const: + if (i == 0) { + tcg_op_remove(ctx->tcg, op); + return true; + } + op->opc = INDEX_op_br; + op->args[0] = label; + break; + } + return false; +} + +static bool fold_bswap(OptContext *ctx, TCGOp *op) +{ + uint64_t z_mask, s_mask, sign; + + if (arg_is_const(op->args[1])) { + uint64_t t = arg_info(op->args[1])->val; + + t = do_constant_folding(op->opc, ctx->type, t, op->args[2]); + return tcg_opt_gen_movi(ctx, op, op->args[0], t); + } + + z_mask = arg_info(op->args[1])->z_mask; + + switch (op->opc) { + case INDEX_op_bswap16_i32: + case INDEX_op_bswap16_i64: + z_mask = bswap16(z_mask); + sign = INT16_MIN; + break; + case INDEX_op_bswap32_i32: + case INDEX_op_bswap32_i64: + z_mask = bswap32(z_mask); + sign = INT32_MIN; + break; + case INDEX_op_bswap64_i64: + z_mask = bswap64(z_mask); + sign = INT64_MIN; + break; + default: + g_assert_not_reached(); + } + s_mask = smask_from_zmask(z_mask); + + switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) { + case TCG_BSWAP_OZ: + break; + case TCG_BSWAP_OS: + /* If the sign bit may be 1, force all the bits above to 1. */ + if (z_mask & sign) { + z_mask |= sign; + s_mask = sign << 1; + } + break; + default: + /* The high bits are undefined: force all bits above the sign to 1. */ + z_mask |= sign << 1; + s_mask = 0; + break; + } + ctx->z_mask = z_mask; + ctx->s_mask = s_mask; + + return fold_masks(ctx, op); +} + +static bool fold_call(OptContext *ctx, TCGOp *op) +{ + TCGContext *s = ctx->tcg; + int nb_oargs = TCGOP_CALLO(op); + int nb_iargs = TCGOP_CALLI(op); + int flags, i; + + init_arguments(ctx, op, nb_oargs + nb_iargs); + copy_propagate(ctx, op, nb_oargs, nb_iargs); + + /* If the function reads or writes globals, reset temp data. */ + flags = tcg_call_flags(op); + if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) { + int nb_globals = s->nb_globals; + + for (i = 0; i < nb_globals; i++) { + if (test_bit(i, ctx->temps_used.l)) { + reset_ts(&ctx->tcg->temps[i]); + } + } + } + + /* Reset temp data for outputs. */ + for (i = 0; i < nb_oargs; i++) { + reset_temp(op->args[i]); + } + + /* Stop optimizing MB across calls. */ + ctx->prev_mb = NULL; + return true; +} + +static bool fold_count_zeros(OptContext *ctx, TCGOp *op) +{ + uint64_t z_mask; + + if (arg_is_const(op->args[1])) { + uint64_t t = arg_info(op->args[1])->val; + + if (t != 0) { + t = do_constant_folding(op->opc, ctx->type, t, 0); + return tcg_opt_gen_movi(ctx, op, op->args[0], t); + } + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]); + } + + switch (ctx->type) { + case TCG_TYPE_I32: + z_mask = 31; + break; + case TCG_TYPE_I64: + z_mask = 63; + break; + default: + g_assert_not_reached(); + } + ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask; + ctx->s_mask = smask_from_zmask(ctx->z_mask); + return false; +} + +static bool fold_ctpop(OptContext *ctx, TCGOp *op) +{ + if (fold_const1(ctx, op)) { + return true; + } + + switch (ctx->type) { + case TCG_TYPE_I32: + ctx->z_mask = 32 | 31; + break; + case TCG_TYPE_I64: + ctx->z_mask = 64 | 63; + break; + default: + g_assert_not_reached(); + } + ctx->s_mask = smask_from_zmask(ctx->z_mask); + return false; +} + +static bool fold_deposit(OptContext *ctx, TCGOp *op) +{ + if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { + uint64_t t1 = arg_info(op->args[1])->val; + uint64_t t2 = arg_info(op->args[2])->val; + + t1 = deposit64(t1, op->args[3], op->args[4], t2); + return tcg_opt_gen_movi(ctx, op, op->args[0], t1); + } + + ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask, + op->args[3], op->args[4], + arg_info(op->args[2])->z_mask); + return false; +} + +static bool fold_divide(OptContext *ctx, TCGOp *op) +{ + if (fold_const2(ctx, op) || + fold_xi_to_x(ctx, op, 1)) { + return true; + } + return false; +} + +static bool fold_dup(OptContext *ctx, TCGOp *op) +{ + if (arg_is_const(op->args[1])) { + uint64_t t = arg_info(op->args[1])->val; + t = dup_const(TCGOP_VECE(op), t); + return tcg_opt_gen_movi(ctx, op, op->args[0], t); + } + return false; +} + +static bool fold_dup2(OptContext *ctx, TCGOp *op) +{ + if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { + uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32, + arg_info(op->args[2])->val); + return tcg_opt_gen_movi(ctx, op, op->args[0], t); + } + + if (args_are_copies(op->args[1], op->args[2])) { + op->opc = INDEX_op_dup_vec; + TCGOP_VECE(op) = MO_32; + } + return false; +} + +static bool fold_eqv(OptContext *ctx, TCGOp *op) +{ + if (fold_const2_commutative(ctx, op) || + fold_xi_to_x(ctx, op, -1) || + fold_xi_to_not(ctx, op, 0)) { + return true; + } + + ctx->s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; + return false; +} + +static bool fold_extract(OptContext *ctx, TCGOp *op) +{ + uint64_t z_mask_old, z_mask; + int pos = op->args[2]; + int len = op->args[3]; + + if (arg_is_const(op->args[1])) { + uint64_t t; + + t = arg_info(op->args[1])->val; + t = extract64(t, pos, len); + return tcg_opt_gen_movi(ctx, op, op->args[0], t); + } + + z_mask_old = arg_info(op->args[1])->z_mask; + z_mask = extract64(z_mask_old, pos, len); + if (pos == 0) { + ctx->a_mask = z_mask_old ^ z_mask; + } + ctx->z_mask = z_mask; + ctx->s_mask = smask_from_zmask(z_mask); + + return fold_masks(ctx, op); +} + +static bool fold_extract2(OptContext *ctx, TCGOp *op) +{ + if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { + uint64_t v1 = arg_info(op->args[1])->val; + uint64_t v2 = arg_info(op->args[2])->val; + int shr = op->args[3]; + + if (op->opc == INDEX_op_extract2_i64) { + v1 >>= shr; + v2 <<= 64 - shr; + } else { + v1 = (uint32_t)v1 >> shr; + v2 = (uint64_t)((int32_t)v2 << (32 - shr)); + } + return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2); + } + return false; +} + +static bool fold_exts(OptContext *ctx, TCGOp *op) +{ + uint64_t s_mask_old, s_mask, z_mask, sign; + bool type_change = false; + + if (fold_const1(ctx, op)) { + return true; + } + + z_mask = arg_info(op->args[1])->z_mask; + s_mask = arg_info(op->args[1])->s_mask; + s_mask_old = s_mask; + + switch (op->opc) { + CASE_OP_32_64(ext8s): + sign = INT8_MIN; + z_mask = (uint8_t)z_mask; + break; + CASE_OP_32_64(ext16s): + sign = INT16_MIN; + z_mask = (uint16_t)z_mask; + break; + case INDEX_op_ext_i32_i64: + type_change = true; + QEMU_FALLTHROUGH; + case INDEX_op_ext32s_i64: + sign = INT32_MIN; + z_mask = (uint32_t)z_mask; + break; + default: + g_assert_not_reached(); + } + + if (z_mask & sign) { + z_mask |= sign; + } + s_mask |= sign << 1; + + ctx->z_mask = z_mask; + ctx->s_mask = s_mask; + if (!type_change) { + ctx->a_mask = s_mask & ~s_mask_old; + } + + return fold_masks(ctx, op); +} + +static bool fold_extu(OptContext *ctx, TCGOp *op) +{ + uint64_t z_mask_old, z_mask; + bool type_change = false; + + if (fold_const1(ctx, op)) { + return true; + } + + z_mask_old = z_mask = arg_info(op->args[1])->z_mask; + + switch (op->opc) { + CASE_OP_32_64(ext8u): + z_mask = (uint8_t)z_mask; + break; + CASE_OP_32_64(ext16u): + z_mask = (uint16_t)z_mask; + break; + case INDEX_op_extrl_i64_i32: + case INDEX_op_extu_i32_i64: + type_change = true; + QEMU_FALLTHROUGH; + case INDEX_op_ext32u_i64: + z_mask = (uint32_t)z_mask; + break; + case INDEX_op_extrh_i64_i32: + type_change = true; + z_mask >>= 32; + break; + default: + g_assert_not_reached(); + } + + ctx->z_mask = z_mask; + ctx->s_mask = smask_from_zmask(z_mask); + if (!type_change) { + ctx->a_mask = z_mask_old ^ z_mask; + } + return fold_masks(ctx, op); +} + +static bool fold_mb(OptContext *ctx, TCGOp *op) +{ + /* Eliminate duplicate and redundant fence instructions. */ + if (ctx->prev_mb) { + /* + * Merge two barriers of the same type into one, + * or a weaker barrier into a stronger one, + * or two weaker barriers into a stronger one. + * mb X; mb Y => mb X|Y + * mb; strl => mb; st + * ldaq; mb => ld; mb + * ldaq; strl => ld; mb; st + * Other combinations are also merged into a strong + * barrier. This is stricter than specified but for + * the purposes of TCG is better than not optimizing. + */ + ctx->prev_mb->args[0] |= op->args[0]; + tcg_op_remove(ctx->tcg, op); + } else { + ctx->prev_mb = op; + } + return true; +} + +static bool fold_mov(OptContext *ctx, TCGOp *op) +{ + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); +} + +static bool fold_movcond(OptContext *ctx, TCGOp *op) +{ + TCGCond cond = op->args[5]; + int i; + + if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) { + op->args[5] = cond = tcg_swap_cond(cond); + } + /* + * Canonicalize the "false" input reg to match the destination reg so + * that the tcg backend can implement a "move if true" operation. + */ + if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) { + op->args[5] = cond = tcg_invert_cond(cond); + } + + i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond); + if (i >= 0) { + return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]); + } + + ctx->z_mask = arg_info(op->args[3])->z_mask + | arg_info(op->args[4])->z_mask; + ctx->s_mask = arg_info(op->args[3])->s_mask + & arg_info(op->args[4])->s_mask; + + if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { + uint64_t tv = arg_info(op->args[3])->val; + uint64_t fv = arg_info(op->args[4])->val; + TCGOpcode opc; + + switch (ctx->type) { + case TCG_TYPE_I32: + opc = INDEX_op_setcond_i32; + break; + case TCG_TYPE_I64: + opc = INDEX_op_setcond_i64; + break; + default: + g_assert_not_reached(); + } + + if (tv == 1 && fv == 0) { + op->opc = opc; + op->args[3] = cond; + } else if (fv == 1 && tv == 0) { + op->opc = opc; + op->args[3] = tcg_invert_cond(cond); + } + } + return false; +} + +static bool fold_mul(OptContext *ctx, TCGOp *op) +{ + if (fold_const2(ctx, op) || + fold_xi_to_i(ctx, op, 0) || + fold_xi_to_x(ctx, op, 1)) { + return true; + } + return false; +} + +static bool fold_mul_highpart(OptContext *ctx, TCGOp *op) +{ + if (fold_const2_commutative(ctx, op) || + fold_xi_to_i(ctx, op, 0)) { + return true; + } + return false; +} + +static bool fold_multiply2(OptContext *ctx, TCGOp *op) +{ + swap_commutative(op->args[0], &op->args[2], &op->args[3]); + + if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) { + uint64_t a = arg_info(op->args[2])->val; + uint64_t b = arg_info(op->args[3])->val; + uint64_t h, l; + TCGArg rl, rh; + TCGOp *op2; + + switch (op->opc) { + case INDEX_op_mulu2_i32: + l = (uint64_t)(uint32_t)a * (uint32_t)b; + h = (int32_t)(l >> 32); + l = (int32_t)l; + break; + case INDEX_op_muls2_i32: + l = (int64_t)(int32_t)a * (int32_t)b; + h = l >> 32; + l = (int32_t)l; + break; + case INDEX_op_mulu2_i64: + mulu64(&l, &h, a, b); + break; + case INDEX_op_muls2_i64: + muls64(&l, &h, a, b); + break; + default: + g_assert_not_reached(); + } + + rl = op->args[0]; + rh = op->args[1]; + + /* The proper opcode is supplied by tcg_opt_gen_mov. */ + op2 = tcg_op_insert_before(ctx->tcg, op, 0); + + tcg_opt_gen_movi(ctx, op, rl, l); + tcg_opt_gen_movi(ctx, op2, rh, h); + return true; + } + return false; +} + +static bool fold_nand(OptContext *ctx, TCGOp *op) +{ + if (fold_const2_commutative(ctx, op) || + fold_xi_to_not(ctx, op, -1)) { + return true; + } + + ctx->s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; + return false; +} + +static bool fold_neg(OptContext *ctx, TCGOp *op) +{ + uint64_t z_mask; + + if (fold_const1(ctx, op)) { + return true; + } + + /* Set to 1 all bits to the left of the rightmost. */ + z_mask = arg_info(op->args[1])->z_mask; + ctx->z_mask = -(z_mask & -z_mask); + + /* + * Because of fold_sub_to_neg, we want to always return true, + * via finish_folding. + */ + finish_folding(ctx, op); + return true; +} + +static bool fold_nor(OptContext *ctx, TCGOp *op) +{ + if (fold_const2_commutative(ctx, op) || + fold_xi_to_not(ctx, op, 0)) { + return true; + } + + ctx->s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; + return false; +} + +static bool fold_not(OptContext *ctx, TCGOp *op) +{ + if (fold_const1(ctx, op)) { + return true; + } + + ctx->s_mask = arg_info(op->args[1])->s_mask; + + /* Because of fold_to_not, we want to always return true, via finish. */ + finish_folding(ctx, op); + return true; +} + +static bool fold_or(OptContext *ctx, TCGOp *op) +{ + if (fold_const2_commutative(ctx, op) || + fold_xi_to_x(ctx, op, 0) || + fold_xx_to_x(ctx, op)) { + return true; + } + + ctx->z_mask = arg_info(op->args[1])->z_mask + | arg_info(op->args[2])->z_mask; + ctx->s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; + return fold_masks(ctx, op); +} + +static bool fold_orc(OptContext *ctx, TCGOp *op) +{ + if (fold_const2(ctx, op) || + fold_xx_to_i(ctx, op, -1) || + fold_xi_to_x(ctx, op, -1) || + fold_ix_to_not(ctx, op, 0)) { + return true; + } + + ctx->s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; + return false; +} + +static bool fold_qemu_ld(OptContext *ctx, TCGOp *op) +{ + const TCGOpDef *def = &tcg_op_defs[op->opc]; + MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs]; + MemOp mop = get_memop(oi); + int width = 8 * memop_size(mop); + + if (width < 64) { + ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width); + if (!(mop & MO_SIGN)) { + ctx->z_mask = MAKE_64BIT_MASK(0, width); + ctx->s_mask <<= 1; + } + } + + /* Opcodes that touch guest memory stop the mb optimization. */ + ctx->prev_mb = NULL; + return false; +} + +static bool fold_qemu_st(OptContext *ctx, TCGOp *op) +{ + /* Opcodes that touch guest memory stop the mb optimization. */ + ctx->prev_mb = NULL; + return false; +} + +static bool fold_remainder(OptContext *ctx, TCGOp *op) +{ + if (fold_const2(ctx, op) || + fold_xx_to_i(ctx, op, 0)) { + return true; + } + return false; +} + +static bool fold_setcond(OptContext *ctx, TCGOp *op) +{ + TCGCond cond = op->args[3]; + int i; + + if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) { + op->args[3] = cond = tcg_swap_cond(cond); + } + + i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond); + if (i >= 0) { + return tcg_opt_gen_movi(ctx, op, op->args[0], i); + } + + ctx->z_mask = 1; + ctx->s_mask = smask_from_zmask(1); + return false; +} + +static bool fold_setcond2(OptContext *ctx, TCGOp *op) +{ + TCGCond cond = op->args[5]; + int i, inv = 0; + + if (swap_commutative2(&op->args[1], &op->args[3])) { + op->args[5] = cond = tcg_swap_cond(cond); + } + + i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond); + if (i >= 0) { + goto do_setcond_const; + } + + switch (cond) { + case TCG_COND_LT: + case TCG_COND_GE: + /* + * Simplify LT/GE comparisons vs zero to a single compare + * vs the high word of the input. + */ + if (arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0 && + arg_is_const(op->args[4]) && arg_info(op->args[4])->val == 0) { + goto do_setcond_high; + } + break; + + case TCG_COND_NE: + inv = 1; + QEMU_FALLTHROUGH; + case TCG_COND_EQ: + /* + * Simplify EQ/NE comparisons where one of the pairs + * can be simplified. + */ + i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1], + op->args[3], cond); + switch (i ^ inv) { + case 0: + goto do_setcond_const; + case 1: + goto do_setcond_high; + } + + i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2], + op->args[4], cond); + switch (i ^ inv) { + case 0: + goto do_setcond_const; + case 1: + op->args[2] = op->args[3]; + op->args[3] = cond; + op->opc = INDEX_op_setcond_i32; + break; + } + break; + + default: + break; + + do_setcond_high: + op->args[1] = op->args[2]; + op->args[2] = op->args[4]; + op->args[3] = cond; + op->opc = INDEX_op_setcond_i32; + break; + } + + ctx->z_mask = 1; + ctx->s_mask = smask_from_zmask(1); + return false; + + do_setcond_const: + return tcg_opt_gen_movi(ctx, op, op->args[0], i); +} + +static bool fold_sextract(OptContext *ctx, TCGOp *op) +{ + uint64_t z_mask, s_mask, s_mask_old; + int pos = op->args[2]; + int len = op->args[3]; + + if (arg_is_const(op->args[1])) { + uint64_t t; + + t = arg_info(op->args[1])->val; + t = sextract64(t, pos, len); + return tcg_opt_gen_movi(ctx, op, op->args[0], t); + } + + z_mask = arg_info(op->args[1])->z_mask; + z_mask = sextract64(z_mask, pos, len); + ctx->z_mask = z_mask; + + s_mask_old = arg_info(op->args[1])->s_mask; + s_mask = sextract64(s_mask_old, pos, len); + s_mask |= MAKE_64BIT_MASK(len, 64 - len); + ctx->s_mask = s_mask; + + if (pos == 0) { + ctx->a_mask = s_mask & ~s_mask_old; + } + + return fold_masks(ctx, op); +} + +static bool fold_shift(OptContext *ctx, TCGOp *op) +{ + uint64_t s_mask, z_mask, sign; + + if (fold_const2(ctx, op) || + fold_ix_to_i(ctx, op, 0) || + fold_xi_to_x(ctx, op, 0)) { + return true; + } + + s_mask = arg_info(op->args[1])->s_mask; + z_mask = arg_info(op->args[1])->z_mask; + + if (arg_is_const(op->args[2])) { + int sh = arg_info(op->args[2])->val; + + ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh); + + s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh); + ctx->s_mask = smask_from_smask(s_mask); + + return fold_masks(ctx, op); + } + + switch (op->opc) { + CASE_OP_32_64(sar): + /* + * Arithmetic right shift will not reduce the number of + * input sign repetitions. + */ + ctx->s_mask = s_mask; + break; + CASE_OP_32_64(shr): + /* + * If the sign bit is known zero, then logical right shift + * will not reduced the number of input sign repetitions. + */ + sign = (s_mask & -s_mask) >> 1; + if (!(z_mask & sign)) { + ctx->s_mask = s_mask; + } + break; + default: + break; + } + + return false; +} + +static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op) +{ + TCGOpcode neg_op; + bool have_neg; + + if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) { + return false; + } + + switch (ctx->type) { + case TCG_TYPE_I32: + neg_op = INDEX_op_neg_i32; + have_neg = TCG_TARGET_HAS_neg_i32; + break; + case TCG_TYPE_I64: + neg_op = INDEX_op_neg_i64; + have_neg = TCG_TARGET_HAS_neg_i64; + break; + case TCG_TYPE_V64: + case TCG_TYPE_V128: + case TCG_TYPE_V256: + neg_op = INDEX_op_neg_vec; + have_neg = (TCG_TARGET_HAS_neg_vec && + tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0); + break; + default: + g_assert_not_reached(); + } + if (have_neg) { + op->opc = neg_op; + op->args[1] = op->args[2]; + return fold_neg(ctx, op); + } + return false; +} + +static bool fold_sub(OptContext *ctx, TCGOp *op) +{ + if (fold_const2(ctx, op) || + fold_xx_to_i(ctx, op, 0) || + fold_xi_to_x(ctx, op, 0) || + fold_sub_to_neg(ctx, op)) { + return true; + } + return false; +} + +static bool fold_sub2(OptContext *ctx, TCGOp *op) +{ + return fold_addsub2(ctx, op, false); +} + +static bool fold_tcg_ld(OptContext *ctx, TCGOp *op) +{ + /* We can't do any folding with a load, but we can record bits. */ + switch (op->opc) { + CASE_OP_32_64(ld8s): + ctx->s_mask = MAKE_64BIT_MASK(8, 56); + break; + CASE_OP_32_64(ld8u): + ctx->z_mask = MAKE_64BIT_MASK(0, 8); + ctx->s_mask = MAKE_64BIT_MASK(9, 55); + break; + CASE_OP_32_64(ld16s): + ctx->s_mask = MAKE_64BIT_MASK(16, 48); + break; + CASE_OP_32_64(ld16u): + ctx->z_mask = MAKE_64BIT_MASK(0, 16); + ctx->s_mask = MAKE_64BIT_MASK(17, 47); + break; + case INDEX_op_ld32s_i64: + ctx->s_mask = MAKE_64BIT_MASK(32, 32); + break; + case INDEX_op_ld32u_i64: + ctx->z_mask = MAKE_64BIT_MASK(0, 32); + ctx->s_mask = MAKE_64BIT_MASK(33, 31); + break; + default: + g_assert_not_reached(); + } + return false; +} + +static bool fold_xor(OptContext *ctx, TCGOp *op) +{ + if (fold_const2_commutative(ctx, op) || + fold_xx_to_i(ctx, op, 0) || + fold_xi_to_x(ctx, op, 0) || + fold_xi_to_not(ctx, op, -1)) { + return true; + } + + ctx->z_mask = arg_info(op->args[1])->z_mask + | arg_info(op->args[2])->z_mask; + ctx->s_mask = arg_info(op->args[1])->s_mask + & arg_info(op->args[2])->s_mask; + return fold_masks(ctx, op); +} + /* Propagate constants and copies, fold constant expressions. */ void tcg_optimize(TCGContext *s) { - int nb_temps, nb_globals, i; - TCGOp *op, *op_next, *prev_mb = NULL; - TCGTempSet temps_used; + int nb_temps, i; + TCGOp *op, *op_next; + OptContext ctx = { .tcg = s }; /* Array VALS has an element for each temp. If this temp holds a constant then its value is kept in VALS' element. @@ -613,991 +2014,200 @@ void tcg_optimize(TCGContext *s) available through the doubly linked circular list. */ nb_temps = s->nb_temps; - nb_globals = s->nb_globals; - - memset(&temps_used, 0, sizeof(temps_used)); for (i = 0; i < nb_temps; ++i) { s->temps[i].state_ptr = NULL; } QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { - uint64_t mask, partmask, affected, tmp; - int nb_oargs, nb_iargs; TCGOpcode opc = op->opc; - const TCGOpDef *def = &tcg_op_defs[opc]; + const TCGOpDef *def; + bool done = false; - /* Count the arguments, and initialize the temps that are - going to be used */ + /* Calls are special. */ if (opc == INDEX_op_call) { - nb_oargs = TCGOP_CALLO(op); - nb_iargs = TCGOP_CALLI(op); - for (i = 0; i < nb_oargs + nb_iargs; i++) { - TCGTemp *ts = arg_temp(op->args[i]); - if (ts) { - init_ts_info(&temps_used, ts); - } - } + fold_call(&ctx, op); + continue; + } + + def = &tcg_op_defs[opc]; + init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs); + copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs); + + /* Pre-compute the type of the operation. */ + if (def->flags & TCG_OPF_VECTOR) { + ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op); + } else if (def->flags & TCG_OPF_64BIT) { + ctx.type = TCG_TYPE_I64; } else { - nb_oargs = def->nb_oargs; - nb_iargs = def->nb_iargs; - for (i = 0; i < nb_oargs + nb_iargs; i++) { - init_arg_info(&temps_used, op->args[i]); - } + ctx.type = TCG_TYPE_I32; } - /* Do copy propagation */ - for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { - TCGTemp *ts = arg_temp(op->args[i]); - if (ts && ts_is_copy(ts)) { - op->args[i] = temp_arg(find_better_copy(s, ts)); - } - } + /* Assume all bits affected, no bits known zero, no sign reps. */ + ctx.a_mask = -1; + ctx.z_mask = -1; + ctx.s_mask = 0; - /* For commutative operations make constant second argument */ + /* + * Process each opcode. + * Sorted alphabetically by opcode as much as possible. + */ switch (opc) { CASE_OP_32_64_VEC(add): - CASE_OP_32_64_VEC(mul): - CASE_OP_32_64_VEC(and): - CASE_OP_32_64_VEC(or): - CASE_OP_32_64_VEC(xor): - CASE_OP_32_64(eqv): - CASE_OP_32_64(nand): - CASE_OP_32_64(nor): - CASE_OP_32_64(muluh): - CASE_OP_32_64(mulsh): - swap_commutative(op->args[0], &op->args[1], &op->args[2]); - break; - CASE_OP_32_64(brcond): - if (swap_commutative(-1, &op->args[0], &op->args[1])) { - op->args[2] = tcg_swap_cond(op->args[2]); - } - break; - CASE_OP_32_64(setcond): - if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) { - op->args[3] = tcg_swap_cond(op->args[3]); - } - break; - CASE_OP_32_64(movcond): - if (swap_commutative(-1, &op->args[1], &op->args[2])) { - op->args[5] = tcg_swap_cond(op->args[5]); - } - /* For movcond, we canonicalize the "false" input reg to match - the destination reg so that the tcg backend can implement - a "move if true" operation. */ - if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) { - op->args[5] = tcg_invert_cond(op->args[5]); - } + done = fold_add(&ctx, op); break; CASE_OP_32_64(add2): - swap_commutative(op->args[0], &op->args[2], &op->args[4]); - swap_commutative(op->args[1], &op->args[3], &op->args[5]); + done = fold_add2(&ctx, op); break; - CASE_OP_32_64(mulu2): - CASE_OP_32_64(muls2): - swap_commutative(op->args[0], &op->args[2], &op->args[3]); + CASE_OP_32_64_VEC(and): + done = fold_and(&ctx, op); + break; + CASE_OP_32_64_VEC(andc): + done = fold_andc(&ctx, op); + break; + CASE_OP_32_64(brcond): + done = fold_brcond(&ctx, op); break; case INDEX_op_brcond2_i32: - if (swap_commutative2(&op->args[0], &op->args[2])) { - op->args[4] = tcg_swap_cond(op->args[4]); - } + done = fold_brcond2(&ctx, op); break; - case INDEX_op_setcond2_i32: - if (swap_commutative2(&op->args[1], &op->args[3])) { - op->args[5] = tcg_swap_cond(op->args[5]); - } - break; - default: - break; - } - - /* Simplify expressions for "shift/rot r, 0, a => movi r, 0", - and "sub r, 0, a => neg r, a" case. */ - switch (opc) { - CASE_OP_32_64(shl): - CASE_OP_32_64(shr): - CASE_OP_32_64(sar): - CASE_OP_32_64(rotl): - CASE_OP_32_64(rotr): - if (arg_is_const(op->args[1]) - && arg_info(op->args[1])->val == 0) { - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0); - continue; - } - break; - CASE_OP_32_64_VEC(sub): - { - TCGOpcode neg_op; - bool have_neg; - - if (arg_is_const(op->args[2])) { - /* Proceed with possible constant folding. */ - break; - } - if (opc == INDEX_op_sub_i32) { - neg_op = INDEX_op_neg_i32; - have_neg = TCG_TARGET_HAS_neg_i32; - } else if (opc == INDEX_op_sub_i64) { - neg_op = INDEX_op_neg_i64; - have_neg = TCG_TARGET_HAS_neg_i64; - } else if (TCG_TARGET_HAS_neg_vec) { - TCGType type = TCGOP_VECL(op) + TCG_TYPE_V64; - unsigned vece = TCGOP_VECE(op); - neg_op = INDEX_op_neg_vec; - have_neg = tcg_can_emit_vec_op(neg_op, type, vece) > 0; - } else { - break; - } - if (!have_neg) { - break; - } - if (arg_is_const(op->args[1]) - && arg_info(op->args[1])->val == 0) { - op->opc = neg_op; - reset_temp(op->args[0]); - op->args[1] = op->args[2]; - continue; - } - } - break; - CASE_OP_32_64_VEC(xor): - CASE_OP_32_64(nand): - if (!arg_is_const(op->args[1]) - && arg_is_const(op->args[2]) - && arg_info(op->args[2])->val == -1) { - i = 1; - goto try_not; - } - break; - CASE_OP_32_64(nor): - if (!arg_is_const(op->args[1]) - && arg_is_const(op->args[2]) - && arg_info(op->args[2])->val == 0) { - i = 1; - goto try_not; - } - break; - CASE_OP_32_64_VEC(andc): - if (!arg_is_const(op->args[2]) - && arg_is_const(op->args[1]) - && arg_info(op->args[1])->val == -1) { - i = 2; - goto try_not; - } - break; - CASE_OP_32_64_VEC(orc): - CASE_OP_32_64(eqv): - if (!arg_is_const(op->args[2]) - && arg_is_const(op->args[1]) - && arg_info(op->args[1])->val == 0) { - i = 2; - goto try_not; - } - break; - try_not: - { - TCGOpcode not_op; - bool have_not; - - if (def->flags & TCG_OPF_VECTOR) { - not_op = INDEX_op_not_vec; - have_not = TCG_TARGET_HAS_not_vec; - } else if (def->flags & TCG_OPF_64BIT) { - not_op = INDEX_op_not_i64; - have_not = TCG_TARGET_HAS_not_i64; - } else { - not_op = INDEX_op_not_i32; - have_not = TCG_TARGET_HAS_not_i32; - } - if (!have_not) { - break; - } - op->opc = not_op; - reset_temp(op->args[0]); - op->args[1] = op->args[i]; - continue; - } - default: - break; - } - - /* Simplify expression for "op r, a, const => mov r, a" cases */ - switch (opc) { - CASE_OP_32_64_VEC(add): - CASE_OP_32_64_VEC(sub): - CASE_OP_32_64_VEC(or): - CASE_OP_32_64_VEC(xor): - CASE_OP_32_64_VEC(andc): - CASE_OP_32_64(shl): - CASE_OP_32_64(shr): - CASE_OP_32_64(sar): - CASE_OP_32_64(rotl): - CASE_OP_32_64(rotr): - if (!arg_is_const(op->args[1]) - && arg_is_const(op->args[2]) - && arg_info(op->args[2])->val == 0) { - tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); - continue; - } - break; - CASE_OP_32_64_VEC(and): - CASE_OP_32_64_VEC(orc): - CASE_OP_32_64(eqv): - if (!arg_is_const(op->args[1]) - && arg_is_const(op->args[2]) - && arg_info(op->args[2])->val == -1) { - tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); - continue; - } - break; - default: - break; - } - - /* Simplify using known-zero bits. Currently only ops with a single - output argument is supported. */ - mask = -1; - affected = -1; - switch (opc) { - CASE_OP_32_64(ext8s): - if ((arg_info(op->args[1])->mask & 0x80) != 0) { - break; - } - QEMU_FALLTHROUGH; - CASE_OP_32_64(ext8u): - mask = 0xff; - goto and_const; - CASE_OP_32_64(ext16s): - if ((arg_info(op->args[1])->mask & 0x8000) != 0) { - break; - } - QEMU_FALLTHROUGH; - CASE_OP_32_64(ext16u): - mask = 0xffff; - goto and_const; - case INDEX_op_ext32s_i64: - if ((arg_info(op->args[1])->mask & 0x80000000) != 0) { - break; - } - QEMU_FALLTHROUGH; - case INDEX_op_ext32u_i64: - mask = 0xffffffffU; - goto and_const; - - CASE_OP_32_64(and): - mask = arg_info(op->args[2])->mask; - if (arg_is_const(op->args[2])) { - and_const: - affected = arg_info(op->args[1])->mask & ~mask; - } - mask = arg_info(op->args[1])->mask & mask; - break; - - case INDEX_op_ext_i32_i64: - if ((arg_info(op->args[1])->mask & 0x80000000) != 0) { - break; - } - QEMU_FALLTHROUGH; - case INDEX_op_extu_i32_i64: - /* We do not compute affected as it is a size changing op. */ - mask = (uint32_t)arg_info(op->args[1])->mask; - break; - - CASE_OP_32_64(andc): - /* Known-zeros does not imply known-ones. Therefore unless - op->args[2] is constant, we can't infer anything from it. */ - if (arg_is_const(op->args[2])) { - mask = ~arg_info(op->args[2])->mask; - goto and_const; - } - /* But we certainly know nothing outside args[1] may be set. */ - mask = arg_info(op->args[1])->mask; - break; - - case INDEX_op_sar_i32: - if (arg_is_const(op->args[2])) { - tmp = arg_info(op->args[2])->val & 31; - mask = (int32_t)arg_info(op->args[1])->mask >> tmp; - } - break; - case INDEX_op_sar_i64: - if (arg_is_const(op->args[2])) { - tmp = arg_info(op->args[2])->val & 63; - mask = (int64_t)arg_info(op->args[1])->mask >> tmp; - } - break; - - case INDEX_op_shr_i32: - if (arg_is_const(op->args[2])) { - tmp = arg_info(op->args[2])->val & 31; - mask = (uint32_t)arg_info(op->args[1])->mask >> tmp; - } - break; - case INDEX_op_shr_i64: - if (arg_is_const(op->args[2])) { - tmp = arg_info(op->args[2])->val & 63; - mask = (uint64_t)arg_info(op->args[1])->mask >> tmp; - } - break; - - case INDEX_op_extrl_i64_i32: - mask = (uint32_t)arg_info(op->args[1])->mask; - break; - case INDEX_op_extrh_i64_i32: - mask = (uint64_t)arg_info(op->args[1])->mask >> 32; - break; - - CASE_OP_32_64(shl): - if (arg_is_const(op->args[2])) { - tmp = arg_info(op->args[2])->val & (TCG_TARGET_REG_BITS - 1); - mask = arg_info(op->args[1])->mask << tmp; - } - break; - - CASE_OP_32_64(neg): - /* Set to 1 all bits to the left of the rightmost. */ - mask = -(arg_info(op->args[1])->mask - & -arg_info(op->args[1])->mask); - break; - - CASE_OP_32_64(deposit): - mask = deposit64(arg_info(op->args[1])->mask, - op->args[3], op->args[4], - arg_info(op->args[2])->mask); - break; - - CASE_OP_32_64(extract): - mask = extract64(arg_info(op->args[1])->mask, - op->args[2], op->args[3]); - if (op->args[2] == 0) { - affected = arg_info(op->args[1])->mask & ~mask; - } - break; - CASE_OP_32_64(sextract): - mask = sextract64(arg_info(op->args[1])->mask, - op->args[2], op->args[3]); - if (op->args[2] == 0 && (tcg_target_long)mask >= 0) { - affected = arg_info(op->args[1])->mask & ~mask; - } - break; - - CASE_OP_32_64(or): - CASE_OP_32_64(xor): - mask = arg_info(op->args[1])->mask | arg_info(op->args[2])->mask; - break; - - case INDEX_op_clz_i32: - case INDEX_op_ctz_i32: - mask = arg_info(op->args[2])->mask | 31; - break; - - case INDEX_op_clz_i64: - case INDEX_op_ctz_i64: - mask = arg_info(op->args[2])->mask | 63; - break; - - case INDEX_op_ctpop_i32: - mask = 32 | 31; - break; - case INDEX_op_ctpop_i64: - mask = 64 | 63; - break; - - CASE_OP_32_64(setcond): - case INDEX_op_setcond2_i32: - mask = 1; - break; - - CASE_OP_32_64(movcond): - mask = arg_info(op->args[3])->mask | arg_info(op->args[4])->mask; - break; - - CASE_OP_32_64(ld8u): - mask = 0xff; - break; - CASE_OP_32_64(ld16u): - mask = 0xffff; - break; - case INDEX_op_ld32u_i64: - mask = 0xffffffffu; - break; - - CASE_OP_32_64(qemu_ld): - { - TCGMemOpIdx oi = op->args[nb_oargs + nb_iargs]; - MemOp mop = get_memop(oi); - if (!(mop & MO_SIGN)) { - mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1; - } - } - break; - - CASE_OP_32_64(bswap16): - mask = arg_info(op->args[1])->mask; - if (mask <= 0xffff) { - op->args[2] |= TCG_BSWAP_IZ; - } - mask = bswap16(mask); - switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) { - case TCG_BSWAP_OZ: - break; - case TCG_BSWAP_OS: - mask = (int16_t)mask; - break; - default: /* undefined high bits */ - mask |= MAKE_64BIT_MASK(16, 48); - break; - } - break; - - case INDEX_op_bswap32_i64: - mask = arg_info(op->args[1])->mask; - if (mask <= 0xffffffffu) { - op->args[2] |= TCG_BSWAP_IZ; - } - mask = bswap32(mask); - switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) { - case TCG_BSWAP_OZ: - break; - case TCG_BSWAP_OS: - mask = (int32_t)mask; - break; - default: /* undefined high bits */ - mask |= MAKE_64BIT_MASK(32, 32); - break; - } - break; - - default: - break; - } - - /* 32-bit ops generate 32-bit results. For the result is zero test - below, we can ignore high bits, but for further optimizations we - need to record that the high bits contain garbage. */ - partmask = mask; - if (!(def->flags & TCG_OPF_64BIT)) { - mask |= ~(tcg_target_ulong)0xffffffffu; - partmask &= 0xffffffffu; - affected &= 0xffffffffu; - } - - if (partmask == 0) { - tcg_debug_assert(nb_oargs == 1); - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0); - continue; - } - if (affected == 0) { - tcg_debug_assert(nb_oargs == 1); - tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); - continue; - } - - /* Simplify expression for "op r, a, 0 => movi r, 0" cases */ - switch (opc) { - CASE_OP_32_64_VEC(and): - CASE_OP_32_64_VEC(mul): - CASE_OP_32_64(muluh): - CASE_OP_32_64(mulsh): - if (arg_is_const(op->args[2]) - && arg_info(op->args[2])->val == 0) { - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0); - continue; - } - break; - default: - break; - } - - /* Simplify expression for "op r, a, a => mov r, a" cases */ - switch (opc) { - CASE_OP_32_64_VEC(or): - CASE_OP_32_64_VEC(and): - if (args_are_copies(op->args[1], op->args[2])) { - tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); - continue; - } - break; - default: - break; - } - - /* Simplify expression for "op r, a, a => movi r, 0" cases */ - switch (opc) { - CASE_OP_32_64_VEC(andc): - CASE_OP_32_64_VEC(sub): - CASE_OP_32_64_VEC(xor): - if (args_are_copies(op->args[1], op->args[2])) { - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], 0); - continue; - } - break; - default: - break; - } - - /* Propagate constants through copy operations and do constant - folding. Constants will be substituted to arguments by register - allocator where needed and possible. Also detect copies. */ - switch (opc) { - CASE_OP_32_64_VEC(mov): - tcg_opt_gen_mov(s, op, op->args[0], op->args[1]); - break; - - case INDEX_op_dup_vec: - if (arg_is_const(op->args[1])) { - tmp = arg_info(op->args[1])->val; - tmp = dup_const(TCGOP_VECE(op), tmp); - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); - break; - } - goto do_default; - - case INDEX_op_dup2_vec: - assert(TCG_TARGET_REG_BITS == 32); - if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], - deposit64(arg_info(op->args[1])->val, 32, 32, - arg_info(op->args[2])->val)); - break; - } else if (args_are_copies(op->args[1], op->args[2])) { - op->opc = INDEX_op_dup_vec; - TCGOP_VECE(op) = MO_32; - nb_iargs = 1; - } - goto do_default; - - CASE_OP_32_64(not): - CASE_OP_32_64(neg): - CASE_OP_32_64(ext8s): - CASE_OP_32_64(ext8u): - CASE_OP_32_64(ext16s): - CASE_OP_32_64(ext16u): - CASE_OP_32_64(ctpop): - case INDEX_op_ext32s_i64: - case INDEX_op_ext32u_i64: - case INDEX_op_ext_i32_i64: - case INDEX_op_extu_i32_i64: - case INDEX_op_extrl_i64_i32: - case INDEX_op_extrh_i64_i32: - if (arg_is_const(op->args[1])) { - tmp = do_constant_folding(opc, arg_info(op->args[1])->val, 0); - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); - break; - } - goto do_default; - CASE_OP_32_64(bswap16): CASE_OP_32_64(bswap32): case INDEX_op_bswap64_i64: - if (arg_is_const(op->args[1])) { - tmp = do_constant_folding(opc, arg_info(op->args[1])->val, - op->args[2]); - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); - break; - } - goto do_default; - - CASE_OP_32_64(add): - CASE_OP_32_64(sub): - CASE_OP_32_64(mul): - CASE_OP_32_64(or): - CASE_OP_32_64(and): - CASE_OP_32_64(xor): - CASE_OP_32_64(shl): - CASE_OP_32_64(shr): - CASE_OP_32_64(sar): - CASE_OP_32_64(rotl): - CASE_OP_32_64(rotr): - CASE_OP_32_64(andc): - CASE_OP_32_64(orc): - CASE_OP_32_64(eqv): - CASE_OP_32_64(nand): - CASE_OP_32_64(nor): - CASE_OP_32_64(muluh): - CASE_OP_32_64(mulsh): - CASE_OP_32_64(div): - CASE_OP_32_64(divu): - CASE_OP_32_64(rem): - CASE_OP_32_64(remu): - if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { - tmp = do_constant_folding(opc, arg_info(op->args[1])->val, - arg_info(op->args[2])->val); - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); - break; - } - goto do_default; - + done = fold_bswap(&ctx, op); + break; CASE_OP_32_64(clz): CASE_OP_32_64(ctz): - if (arg_is_const(op->args[1])) { - TCGArg v = arg_info(op->args[1])->val; - if (v != 0) { - tmp = do_constant_folding(opc, v, 0); - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); - } else { - tcg_opt_gen_mov(s, op, op->args[0], op->args[2]); - } - break; - } - goto do_default; - + done = fold_count_zeros(&ctx, op); + break; + CASE_OP_32_64(ctpop): + done = fold_ctpop(&ctx, op); + break; CASE_OP_32_64(deposit): - if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { - tmp = deposit64(arg_info(op->args[1])->val, - op->args[3], op->args[4], - arg_info(op->args[2])->val); - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); - break; - } - goto do_default; - + done = fold_deposit(&ctx, op); + break; + CASE_OP_32_64(div): + CASE_OP_32_64(divu): + done = fold_divide(&ctx, op); + break; + case INDEX_op_dup_vec: + done = fold_dup(&ctx, op); + break; + case INDEX_op_dup2_vec: + done = fold_dup2(&ctx, op); + break; + CASE_OP_32_64(eqv): + done = fold_eqv(&ctx, op); + break; CASE_OP_32_64(extract): - if (arg_is_const(op->args[1])) { - tmp = extract64(arg_info(op->args[1])->val, - op->args[2], op->args[3]); - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); - break; - } - goto do_default; - - CASE_OP_32_64(sextract): - if (arg_is_const(op->args[1])) { - tmp = sextract64(arg_info(op->args[1])->val, - op->args[2], op->args[3]); - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); - break; - } - goto do_default; - + done = fold_extract(&ctx, op); + break; CASE_OP_32_64(extract2): - if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { - uint64_t v1 = arg_info(op->args[1])->val; - uint64_t v2 = arg_info(op->args[2])->val; - int shr = op->args[3]; - - if (opc == INDEX_op_extract2_i64) { - tmp = (v1 >> shr) | (v2 << (64 - shr)); - } else { - tmp = (int32_t)(((uint32_t)v1 >> shr) | - ((uint32_t)v2 << (32 - shr))); - } - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); - break; - } - goto do_default; - - CASE_OP_32_64(setcond): - tmp = do_constant_folding_cond(opc, op->args[1], - op->args[2], op->args[3]); - if (tmp != 2) { - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); - break; - } - goto do_default; - - CASE_OP_32_64(brcond): - tmp = do_constant_folding_cond(opc, op->args[0], - op->args[1], op->args[2]); - if (tmp != 2) { - if (tmp) { - memset(&temps_used, 0, sizeof(temps_used)); - op->opc = INDEX_op_br; - op->args[0] = op->args[3]; - } else { - tcg_op_remove(s, op); - } - break; - } - goto do_default; - + done = fold_extract2(&ctx, op); + break; + CASE_OP_32_64(ext8s): + CASE_OP_32_64(ext16s): + case INDEX_op_ext32s_i64: + case INDEX_op_ext_i32_i64: + done = fold_exts(&ctx, op); + break; + CASE_OP_32_64(ext8u): + CASE_OP_32_64(ext16u): + case INDEX_op_ext32u_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_extrl_i64_i32: + case INDEX_op_extrh_i64_i32: + done = fold_extu(&ctx, op); + break; + CASE_OP_32_64(ld8s): + CASE_OP_32_64(ld8u): + CASE_OP_32_64(ld16s): + CASE_OP_32_64(ld16u): + case INDEX_op_ld32s_i64: + case INDEX_op_ld32u_i64: + done = fold_tcg_ld(&ctx, op); + break; + case INDEX_op_mb: + done = fold_mb(&ctx, op); + break; + CASE_OP_32_64_VEC(mov): + done = fold_mov(&ctx, op); + break; CASE_OP_32_64(movcond): - tmp = do_constant_folding_cond(opc, op->args[1], - op->args[2], op->args[5]); - if (tmp != 2) { - tcg_opt_gen_mov(s, op, op->args[0], op->args[4-tmp]); - break; - } - if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { - uint64_t tv = arg_info(op->args[3])->val; - uint64_t fv = arg_info(op->args[4])->val; - TCGCond cond = op->args[5]; - - if (fv == 1 && tv == 0) { - cond = tcg_invert_cond(cond); - } else if (!(tv == 1 && fv == 0)) { - goto do_default; - } - op->args[3] = cond; - op->opc = opc = (opc == INDEX_op_movcond_i32 - ? INDEX_op_setcond_i32 - : INDEX_op_setcond_i64); - nb_iargs = 2; - } - goto do_default; - - case INDEX_op_add2_i32: - case INDEX_op_sub2_i32: - if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) - && arg_is_const(op->args[4]) && arg_is_const(op->args[5])) { - uint32_t al = arg_info(op->args[2])->val; - uint32_t ah = arg_info(op->args[3])->val; - uint32_t bl = arg_info(op->args[4])->val; - uint32_t bh = arg_info(op->args[5])->val; - uint64_t a = ((uint64_t)ah << 32) | al; - uint64_t b = ((uint64_t)bh << 32) | bl; - TCGArg rl, rh; - TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32); - - if (opc == INDEX_op_add2_i32) { - a += b; - } else { - a -= b; - } - - rl = op->args[0]; - rh = op->args[1]; - tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)a); - tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(a >> 32)); - break; - } - goto do_default; - - case INDEX_op_mulu2_i32: - if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) { - uint32_t a = arg_info(op->args[2])->val; - uint32_t b = arg_info(op->args[3])->val; - uint64_t r = (uint64_t)a * b; - TCGArg rl, rh; - TCGOp *op2 = tcg_op_insert_before(s, op, INDEX_op_mov_i32); - - rl = op->args[0]; - rh = op->args[1]; - tcg_opt_gen_movi(s, &temps_used, op, rl, (int32_t)r); - tcg_opt_gen_movi(s, &temps_used, op2, rh, (int32_t)(r >> 32)); - break; - } - goto do_default; - - case INDEX_op_brcond2_i32: - tmp = do_constant_folding_cond2(&op->args[0], &op->args[2], - op->args[4]); - if (tmp != 2) { - if (tmp) { - do_brcond_true: - memset(&temps_used, 0, sizeof(temps_used)); - op->opc = INDEX_op_br; - op->args[0] = op->args[5]; - } else { - do_brcond_false: - tcg_op_remove(s, op); - } - } else if ((op->args[4] == TCG_COND_LT - || op->args[4] == TCG_COND_GE) - && arg_is_const(op->args[2]) - && arg_info(op->args[2])->val == 0 - && arg_is_const(op->args[3]) - && arg_info(op->args[3])->val == 0) { - /* Simplify LT/GE comparisons vs zero to a single compare - vs the high word of the input. */ - do_brcond_high: - memset(&temps_used, 0, sizeof(temps_used)); - op->opc = INDEX_op_brcond_i32; - op->args[0] = op->args[1]; - op->args[1] = op->args[3]; - op->args[2] = op->args[4]; - op->args[3] = op->args[5]; - } else if (op->args[4] == TCG_COND_EQ) { - /* Simplify EQ comparisons where one of the pairs - can be simplified. */ - tmp = do_constant_folding_cond(INDEX_op_brcond_i32, - op->args[0], op->args[2], - TCG_COND_EQ); - if (tmp == 0) { - goto do_brcond_false; - } else if (tmp == 1) { - goto do_brcond_high; - } - tmp = do_constant_folding_cond(INDEX_op_brcond_i32, - op->args[1], op->args[3], - TCG_COND_EQ); - if (tmp == 0) { - goto do_brcond_false; - } else if (tmp != 1) { - goto do_default; - } - do_brcond_low: - memset(&temps_used, 0, sizeof(temps_used)); - op->opc = INDEX_op_brcond_i32; - op->args[1] = op->args[2]; - op->args[2] = op->args[4]; - op->args[3] = op->args[5]; - } else if (op->args[4] == TCG_COND_NE) { - /* Simplify NE comparisons where one of the pairs - can be simplified. */ - tmp = do_constant_folding_cond(INDEX_op_brcond_i32, - op->args[0], op->args[2], - TCG_COND_NE); - if (tmp == 0) { - goto do_brcond_high; - } else if (tmp == 1) { - goto do_brcond_true; - } - tmp = do_constant_folding_cond(INDEX_op_brcond_i32, - op->args[1], op->args[3], - TCG_COND_NE); - if (tmp == 0) { - goto do_brcond_low; - } else if (tmp == 1) { - goto do_brcond_true; - } - goto do_default; - } else { - goto do_default; - } + done = fold_movcond(&ctx, op); + break; + CASE_OP_32_64(mul): + done = fold_mul(&ctx, op); + break; + CASE_OP_32_64(mulsh): + CASE_OP_32_64(muluh): + done = fold_mul_highpart(&ctx, op); + break; + CASE_OP_32_64(muls2): + CASE_OP_32_64(mulu2): + done = fold_multiply2(&ctx, op); + break; + CASE_OP_32_64(nand): + done = fold_nand(&ctx, op); + break; + CASE_OP_32_64(neg): + done = fold_neg(&ctx, op); + break; + CASE_OP_32_64(nor): + done = fold_nor(&ctx, op); + break; + CASE_OP_32_64_VEC(not): + done = fold_not(&ctx, op); + break; + CASE_OP_32_64_VEC(or): + done = fold_or(&ctx, op); + break; + CASE_OP_32_64_VEC(orc): + done = fold_orc(&ctx, op); + break; + case INDEX_op_qemu_ld_i32: + case INDEX_op_qemu_ld_i64: + done = fold_qemu_ld(&ctx, op); + break; + case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st8_i32: + case INDEX_op_qemu_st_i64: + done = fold_qemu_st(&ctx, op); + break; + CASE_OP_32_64(rem): + CASE_OP_32_64(remu): + done = fold_remainder(&ctx, op); + break; + CASE_OP_32_64(rotl): + CASE_OP_32_64(rotr): + CASE_OP_32_64(sar): + CASE_OP_32_64(shl): + CASE_OP_32_64(shr): + done = fold_shift(&ctx, op); + break; + CASE_OP_32_64(setcond): + done = fold_setcond(&ctx, op); break; - case INDEX_op_setcond2_i32: - tmp = do_constant_folding_cond2(&op->args[1], &op->args[3], - op->args[5]); - if (tmp != 2) { - do_setcond_const: - tcg_opt_gen_movi(s, &temps_used, op, op->args[0], tmp); - } else if ((op->args[5] == TCG_COND_LT - || op->args[5] == TCG_COND_GE) - && arg_is_const(op->args[3]) - && arg_info(op->args[3])->val == 0 - && arg_is_const(op->args[4]) - && arg_info(op->args[4])->val == 0) { - /* Simplify LT/GE comparisons vs zero to a single compare - vs the high word of the input. */ - do_setcond_high: - reset_temp(op->args[0]); - arg_info(op->args[0])->mask = 1; - op->opc = INDEX_op_setcond_i32; - op->args[1] = op->args[2]; - op->args[2] = op->args[4]; - op->args[3] = op->args[5]; - } else if (op->args[5] == TCG_COND_EQ) { - /* Simplify EQ comparisons where one of the pairs - can be simplified. */ - tmp = do_constant_folding_cond(INDEX_op_setcond_i32, - op->args[1], op->args[3], - TCG_COND_EQ); - if (tmp == 0) { - goto do_setcond_const; - } else if (tmp == 1) { - goto do_setcond_high; - } - tmp = do_constant_folding_cond(INDEX_op_setcond_i32, - op->args[2], op->args[4], - TCG_COND_EQ); - if (tmp == 0) { - goto do_setcond_high; - } else if (tmp != 1) { - goto do_default; - } - do_setcond_low: - reset_temp(op->args[0]); - arg_info(op->args[0])->mask = 1; - op->opc = INDEX_op_setcond_i32; - op->args[2] = op->args[3]; - op->args[3] = op->args[5]; - } else if (op->args[5] == TCG_COND_NE) { - /* Simplify NE comparisons where one of the pairs - can be simplified. */ - tmp = do_constant_folding_cond(INDEX_op_setcond_i32, - op->args[1], op->args[3], - TCG_COND_NE); - if (tmp == 0) { - goto do_setcond_high; - } else if (tmp == 1) { - goto do_setcond_const; - } - tmp = do_constant_folding_cond(INDEX_op_setcond_i32, - op->args[2], op->args[4], - TCG_COND_NE); - if (tmp == 0) { - goto do_setcond_low; - } else if (tmp == 1) { - goto do_setcond_const; - } - goto do_default; - } else { - goto do_default; - } + done = fold_setcond2(&ctx, op); + break; + CASE_OP_32_64(sextract): + done = fold_sextract(&ctx, op); + break; + CASE_OP_32_64_VEC(sub): + done = fold_sub(&ctx, op); + break; + CASE_OP_32_64(sub2): + done = fold_sub2(&ctx, op); + break; + CASE_OP_32_64_VEC(xor): + done = fold_xor(&ctx, op); break; - - case INDEX_op_call: - if (!(tcg_call_flags(op) - & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) { - for (i = 0; i < nb_globals; i++) { - if (test_bit(i, temps_used.l)) { - reset_ts(&s->temps[i]); - } - } - } - goto do_reset_output; - default: - do_default: - /* Default case: we know nothing about operation (or were unable - to compute the operation result) so no propagation is done. - We trash everything if the operation is the end of a basic - block, otherwise we only trash the output args. "mask" is - the non-zero bits mask for the first output arg. */ - if (def->flags & TCG_OPF_BB_END) { - memset(&temps_used, 0, sizeof(temps_used)); - } else { - do_reset_output: - for (i = 0; i < nb_oargs; i++) { - reset_temp(op->args[i]); - /* Save the corresponding known-zero bits mask for the - first output argument (only one supported so far). */ - if (i == 0) { - arg_info(op->args[i])->mask = mask; - } - } - } break; } - /* Eliminate duplicate and redundant fence instructions. */ - if (prev_mb) { - switch (opc) { - case INDEX_op_mb: - /* Merge two barriers of the same type into one, - * or a weaker barrier into a stronger one, - * or two weaker barriers into a stronger one. - * mb X; mb Y => mb X|Y - * mb; strl => mb; st - * ldaq; mb => ld; mb - * ldaq; strl => ld; mb; st - * Other combinations are also merged into a strong - * barrier. This is stricter than specified but for - * the purposes of TCG is better than not optimizing. - */ - prev_mb->args[0] |= op->args[0]; - tcg_op_remove(s, op); - break; - - default: - /* Opcodes that end the block stop the optimization. */ - if ((def->flags & TCG_OPF_BB_END) == 0) { - break; - } - /* fallthru */ - case INDEX_op_qemu_ld_i32: - case INDEX_op_qemu_ld_i64: - case INDEX_op_qemu_st_i32: - case INDEX_op_qemu_st8_i32: - case INDEX_op_qemu_st_i64: - case INDEX_op_call: - /* Opcodes that touch guest memory stop the optimization. */ - prev_mb = NULL; - break; - } - } else if (opc == INDEX_op_mb) { - prev_mb = op; + if (!done) { + finish_folding(&ctx, op); } } } diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc index 5e1fac914a..3e4ca2be88 100644 --- a/tcg/ppc/tcg-target.c.inc +++ b/tcg/ppc/tcg-target.c.inc @@ -1931,7 +1931,7 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target) #endif } -static const uint32_t qemu_ldx_opc[16] = { +static const uint32_t qemu_ldx_opc[(MO_SSIZE + MO_BSWAP) + 1] = { [MO_UB] = LBZX, [MO_UW] = LHZX, [MO_UL] = LWZX, @@ -1944,7 +1944,7 @@ static const uint32_t qemu_ldx_opc[16] = { [MO_BSWAP | MO_Q] = LDBRX, }; -static const uint32_t qemu_stx_opc[16] = { +static const uint32_t qemu_stx_opc[(MO_SIZE + MO_BSWAP) + 1] = { [MO_UB] = STBX, [MO_UW] = STHX, [MO_UL] = STWX, @@ -1965,7 +1965,7 @@ static const uint32_t qemu_exts_opc[4] = { /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, * int mmu_idx, uintptr_t ra) */ -static void * const qemu_ld_helpers[16] = { +static void * const qemu_ld_helpers[(MO_SIZE | MO_BSWAP) + 1] = { [MO_UB] = helper_ret_ldub_mmu, [MO_LEUW] = helper_le_lduw_mmu, [MO_LEUL] = helper_le_ldul_mmu, @@ -1978,7 +1978,7 @@ static void * const qemu_ld_helpers[16] = { /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr, * uintxx_t val, int mmu_idx, uintptr_t ra) */ -static void * const qemu_st_helpers[16] = { +static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = { [MO_UB] = helper_ret_stb_mmu, [MO_LEUW] = helper_le_stw_mmu, [MO_LEUL] = helper_le_stl_mmu, @@ -2103,7 +2103,7 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc, /* Record the context of a call to the out of line helper code for the slow path for a load or store, so that we can later generate the correct helper code. */ -static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, TCGReg datalo_reg, TCGReg datahi_reg, TCGReg addrlo_reg, TCGReg addrhi_reg, tcg_insn_unit *raddr, tcg_insn_unit *lptr) @@ -2122,7 +2122,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { - TCGMemOpIdx oi = lb->oi; + MemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); TCGReg hi, lo, arg = TCG_REG_R3; @@ -2169,7 +2169,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { - TCGMemOpIdx oi = lb->oi; + MemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); MemOp s_bits = opc & MO_SIZE; TCGReg hi, lo, arg = TCG_REG_R3; @@ -2233,7 +2233,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) { TCGReg datalo, datahi, addrlo, rbase; TCGReg addrhi __attribute__((unused)); - TCGMemOpIdx oi; + MemOpIdx oi; MemOp opc, s_bits; #ifdef CONFIG_SOFTMMU int mem_index; @@ -2308,7 +2308,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) { TCGReg datalo, datahi, addrlo, rbase; TCGReg addrhi __attribute__((unused)); - TCGMemOpIdx oi; + MemOpIdx oi; MemOp opc, s_bits; #ifdef CONFIG_SOFTMMU int mem_index; diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index dc8d8f1de2..9b13a46fb4 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -850,9 +850,9 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0) #include "../tcg-ldst.c.inc" /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, - * TCGMemOpIdx oi, uintptr_t ra) + * MemOpIdx oi, uintptr_t ra) */ -static void * const qemu_ld_helpers[8] = { +static void * const qemu_ld_helpers[MO_SSIZE + 1] = { [MO_UB] = helper_ret_ldub_mmu, [MO_SB] = helper_ret_ldsb_mmu, #ifdef HOST_WORDS_BIGENDIAN @@ -875,10 +875,10 @@ static void * const qemu_ld_helpers[8] = { }; /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, - * uintxx_t val, TCGMemOpIdx oi, + * uintxx_t val, MemOpIdx oi, * uintptr_t ra) */ -static void * const qemu_st_helpers[4] = { +static void * const qemu_st_helpers[MO_SIZE + 1] = { [MO_8] = helper_ret_stb_mmu, #ifdef HOST_WORDS_BIGENDIAN [MO_16] = helper_be_stw_mmu, @@ -906,7 +906,7 @@ static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) } static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, - TCGReg addrh, TCGMemOpIdx oi, + TCGReg addrh, MemOpIdx oi, tcg_insn_unit **label_ptr, bool is_load) { MemOp opc = get_memop(oi); @@ -959,7 +959,7 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addrl); } -static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, +static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi, TCGType ext, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addrhi, @@ -980,7 +980,7 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { - TCGMemOpIdx oi = l->oi; + MemOpIdx oi = l->oi; MemOp opc = get_memop(oi); TCGReg a0 = tcg_target_call_iarg_regs[0]; TCGReg a1 = tcg_target_call_iarg_regs[1]; @@ -1012,7 +1012,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { - TCGMemOpIdx oi = l->oi; + MemOpIdx oi = l->oi; MemOp opc = get_memop(oi); MemOp s_bits = opc & MO_SIZE; TCGReg a0 = tcg_target_call_iarg_regs[0]; @@ -1104,7 +1104,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) { TCGReg addr_regl, addr_regh __attribute__((unused)); TCGReg data_regl, data_regh; - TCGMemOpIdx oi; + MemOpIdx oi; MemOp opc; #if defined(CONFIG_SOFTMMU) tcg_insn_unit *label_ptr[1]; @@ -1170,7 +1170,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) { TCGReg addr_regl, addr_regh __attribute__((unused)); TCGReg data_regl, data_regh; - TCGMemOpIdx oi; + MemOpIdx oi; MemOp opc; #if defined(CONFIG_SOFTMMU) tcg_insn_unit *label_ptr[1]; diff --git a/tcg/s390/tcg-target-con-set.h b/tcg/s390x/tcg-target-con-set.h similarity index 86% rename from tcg/s390/tcg-target-con-set.h rename to tcg/s390x/tcg-target-con-set.h index 31985e4903..426dd92e51 100644 --- a/tcg/s390/tcg-target-con-set.h +++ b/tcg/s390x/tcg-target-con-set.h @@ -13,13 +13,20 @@ C_O0_I1(r) C_O0_I2(L, L) C_O0_I2(r, r) C_O0_I2(r, ri) +C_O0_I2(v, r) C_O1_I1(r, L) C_O1_I1(r, r) +C_O1_I1(v, r) +C_O1_I1(v, v) +C_O1_I1(v, vr) C_O1_I2(r, 0, ri) C_O1_I2(r, 0, rI) C_O1_I2(r, 0, rJ) C_O1_I2(r, r, ri) C_O1_I2(r, rZ, r) +C_O1_I2(v, v, r) +C_O1_I2(v, v, v) +C_O1_I3(v, v, v, v) C_O1_I4(r, r, ri, r, 0) C_O1_I4(r, r, ri, rI, 0) C_O2_I2(b, a, 0, r) diff --git a/tcg/s390/tcg-target-con-str.h b/tcg/s390x/tcg-target-con-str.h similarity index 96% rename from tcg/s390/tcg-target-con-str.h rename to tcg/s390x/tcg-target-con-str.h index 892d8f8c06..8bb0358ae5 100644 --- a/tcg/s390/tcg-target-con-str.h +++ b/tcg/s390x/tcg-target-con-str.h @@ -10,6 +10,7 @@ */ REGS('r', ALL_GENERAL_REGS) REGS('L', ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS) +REGS('v', ALL_VECTOR_REGS) /* * A (single) even/odd pair for division. * TODO: Add something to the register allocator to allow diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc similarity index 73% rename from tcg/s390/tcg-target.c.inc rename to tcg/s390x/tcg-target.c.inc index b82cf19f09..57e803e339 100644 --- a/tcg/s390/tcg-target.c.inc +++ b/tcg/s390x/tcg-target.c.inc @@ -43,6 +43,8 @@ #define TCG_CT_CONST_ZERO 0x800 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 16) +#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32) + /* * For softmmu, we need to avoid conflicts with the first 3 * argument registers to perform the tlb lookup, and to call @@ -66,7 +68,7 @@ We don't need this when we have pc-relative loads with the general instructions extension facility. */ #define TCG_REG_TB TCG_REG_R12 -#define USE_REG_TB (!(s390_facilities & FACILITY_GEN_INST_EXT)) +#define USE_REG_TB (!HAVE_FACILITY(GEN_INST_EXT)) #ifndef CONFIG_SOFTMMU #define TCG_GUEST_BASE_REG TCG_REG_R13 @@ -263,13 +265,66 @@ typedef enum S390Opcode { RX_STC = 0x42, RX_STH = 0x40, + VRIa_VGBM = 0xe744, + VRIa_VREPI = 0xe745, + VRIb_VGM = 0xe746, + VRIc_VREP = 0xe74d, + + VRRa_VLC = 0xe7de, + VRRa_VLP = 0xe7df, + VRRa_VLR = 0xe756, + VRRc_VA = 0xe7f3, + VRRc_VCEQ = 0xe7f8, /* we leave the m5 cs field 0 */ + VRRc_VCH = 0xe7fb, /* " */ + VRRc_VCHL = 0xe7f9, /* " */ + VRRc_VERLLV = 0xe773, + VRRc_VESLV = 0xe770, + VRRc_VESRAV = 0xe77a, + VRRc_VESRLV = 0xe778, + VRRc_VML = 0xe7a2, + VRRc_VMN = 0xe7fe, + VRRc_VMNL = 0xe7fc, + VRRc_VMX = 0xe7ff, + VRRc_VMXL = 0xe7fd, + VRRc_VN = 0xe768, + VRRc_VNC = 0xe769, + VRRc_VNO = 0xe76b, + VRRc_VO = 0xe76a, + VRRc_VOC = 0xe76f, + VRRc_VPKS = 0xe797, /* we leave the m5 cs field 0 */ + VRRc_VS = 0xe7f7, + VRRa_VUPH = 0xe7d7, + VRRa_VUPL = 0xe7d6, + VRRc_VX = 0xe76d, + VRRe_VSEL = 0xe78d, + VRRf_VLVGP = 0xe762, + + VRSa_VERLL = 0xe733, + VRSa_VESL = 0xe730, + VRSa_VESRA = 0xe73a, + VRSa_VESRL = 0xe738, + VRSb_VLVG = 0xe722, + VRSc_VLGV = 0xe721, + + VRX_VL = 0xe706, + VRX_VLLEZ = 0xe704, + VRX_VLREP = 0xe705, + VRX_VST = 0xe70e, + VRX_VSTEF = 0xe70b, + VRX_VSTEG = 0xe70a, + NOP = 0x0707, } S390Opcode; #ifdef CONFIG_DEBUG_TCG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { - "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", - "%r8", "%r9", "%r10" "%r11" "%r12" "%r13" "%r14" "%r15" + "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", + "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7", + "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15", + "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23", + "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31", }; #endif @@ -295,6 +350,32 @@ static const int tcg_target_reg_alloc_order[] = { TCG_REG_R4, TCG_REG_R3, TCG_REG_R2, + + /* V8-V15 are call saved, and omitted. */ + TCG_REG_V0, + TCG_REG_V1, + TCG_REG_V2, + TCG_REG_V3, + TCG_REG_V4, + TCG_REG_V5, + TCG_REG_V6, + TCG_REG_V7, + TCG_REG_V16, + TCG_REG_V17, + TCG_REG_V18, + TCG_REG_V19, + TCG_REG_V20, + TCG_REG_V21, + TCG_REG_V22, + TCG_REG_V23, + TCG_REG_V24, + TCG_REG_V25, + TCG_REG_V26, + TCG_REG_V27, + TCG_REG_V28, + TCG_REG_V29, + TCG_REG_V30, + TCG_REG_V31, }; static const int tcg_target_call_iarg_regs[] = { @@ -350,7 +431,7 @@ static const uint8_t tcg_cond_to_ltr_cond[] = { }; #ifdef CONFIG_SOFTMMU -static void * const qemu_ld_helpers[16] = { +static void * const qemu_ld_helpers[(MO_SSIZE | MO_BSWAP) + 1] = { [MO_UB] = helper_ret_ldub_mmu, [MO_SB] = helper_ret_ldsb_mmu, [MO_LEUW] = helper_le_lduw_mmu, @@ -365,7 +446,7 @@ static void * const qemu_ld_helpers[16] = { [MO_BEQ] = helper_be_ldq_mmu, }; -static void * const qemu_st_helpers[16] = { +static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = { [MO_UB] = helper_ret_stb_mmu, [MO_LEUW] = helper_le_stw_mmu, [MO_LEUL] = helper_le_stl_mmu, @@ -377,7 +458,17 @@ static void * const qemu_st_helpers[16] = { #endif static const tcg_insn_unit *tb_ret_addr; -uint64_t s390_facilities; +uint64_t s390_facilities[3]; + +static inline bool is_general_reg(TCGReg r) +{ + return r <= TCG_REG_R15; +} + +static inline bool is_vector_reg(TCGReg r) +{ + return r >= TCG_REG_V0 && r <= TCG_REG_V31; +} static bool patch_reloc(tcg_insn_unit *src_rw, int type, intptr_t value, intptr_t addend) @@ -496,6 +587,138 @@ static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1, #define tcg_out_insn_RX tcg_out_insn_RS #define tcg_out_insn_RXY tcg_out_insn_RSY +static int RXB(TCGReg v1, TCGReg v2, TCGReg v3, TCGReg v4) +{ + /* + * Shift bit 4 of each regno to its corresponding bit of RXB. + * RXB itself begins at bit 8 of the instruction so 8 - 4 = 4 + * is the left-shift of the 4th operand. + */ + return ((v1 & 0x10) << (4 + 3)) + | ((v2 & 0x10) << (4 + 2)) + | ((v3 & 0x10) << (4 + 1)) + | ((v4 & 0x10) << (4 + 0)); +} + +static void tcg_out_insn_VRIa(TCGContext *s, S390Opcode op, + TCGReg v1, uint16_t i2, int m3) +{ + tcg_debug_assert(is_vector_reg(v1)); + tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4)); + tcg_out16(s, i2); + tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m3 << 12)); +} + +static void tcg_out_insn_VRIb(TCGContext *s, S390Opcode op, + TCGReg v1, uint8_t i2, uint8_t i3, int m4) +{ + tcg_debug_assert(is_vector_reg(v1)); + tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4)); + tcg_out16(s, (i2 << 8) | (i3 & 0xff)); + tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m4 << 12)); +} + +static void tcg_out_insn_VRIc(TCGContext *s, S390Opcode op, + TCGReg v1, uint16_t i2, TCGReg v3, int m4) +{ + tcg_debug_assert(is_vector_reg(v1)); + tcg_debug_assert(is_vector_reg(v3)); + tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v3 & 0xf)); + tcg_out16(s, i2); + tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, v3, 0) | (m4 << 12)); +} + +static void tcg_out_insn_VRRa(TCGContext *s, S390Opcode op, + TCGReg v1, TCGReg v2, int m3) +{ + tcg_debug_assert(is_vector_reg(v1)); + tcg_debug_assert(is_vector_reg(v2)); + tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf)); + tcg_out32(s, (op & 0x00ff) | RXB(v1, v2, 0, 0) | (m3 << 12)); +} + +static void tcg_out_insn_VRRc(TCGContext *s, S390Opcode op, + TCGReg v1, TCGReg v2, TCGReg v3, int m4) +{ + tcg_debug_assert(is_vector_reg(v1)); + tcg_debug_assert(is_vector_reg(v2)); + tcg_debug_assert(is_vector_reg(v3)); + tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf)); + tcg_out16(s, v3 << 12); + tcg_out16(s, (op & 0x00ff) | RXB(v1, v2, v3, 0) | (m4 << 12)); +} + +static void tcg_out_insn_VRRe(TCGContext *s, S390Opcode op, + TCGReg v1, TCGReg v2, TCGReg v3, TCGReg v4) +{ + tcg_debug_assert(is_vector_reg(v1)); + tcg_debug_assert(is_vector_reg(v2)); + tcg_debug_assert(is_vector_reg(v3)); + tcg_debug_assert(is_vector_reg(v4)); + tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf)); + tcg_out16(s, v3 << 12); + tcg_out16(s, (op & 0x00ff) | RXB(v1, v2, v3, v4) | (v4 << 12)); +} + +static void tcg_out_insn_VRRf(TCGContext *s, S390Opcode op, + TCGReg v1, TCGReg r2, TCGReg r3) +{ + tcg_debug_assert(is_vector_reg(v1)); + tcg_debug_assert(is_general_reg(r2)); + tcg_debug_assert(is_general_reg(r3)); + tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | r2); + tcg_out16(s, r3 << 12); + tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0)); +} + +static void tcg_out_insn_VRSa(TCGContext *s, S390Opcode op, TCGReg v1, + intptr_t d2, TCGReg b2, TCGReg v3, int m4) +{ + tcg_debug_assert(is_vector_reg(v1)); + tcg_debug_assert(d2 >= 0 && d2 <= 0xfff); + tcg_debug_assert(is_general_reg(b2)); + tcg_debug_assert(is_vector_reg(v3)); + tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v3 & 0xf)); + tcg_out16(s, b2 << 12 | d2); + tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, v3, 0) | (m4 << 12)); +} + +static void tcg_out_insn_VRSb(TCGContext *s, S390Opcode op, TCGReg v1, + intptr_t d2, TCGReg b2, TCGReg r3, int m4) +{ + tcg_debug_assert(is_vector_reg(v1)); + tcg_debug_assert(d2 >= 0 && d2 <= 0xfff); + tcg_debug_assert(is_general_reg(b2)); + tcg_debug_assert(is_general_reg(r3)); + tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | r3); + tcg_out16(s, b2 << 12 | d2); + tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m4 << 12)); +} + +static void tcg_out_insn_VRSc(TCGContext *s, S390Opcode op, TCGReg r1, + intptr_t d2, TCGReg b2, TCGReg v3, int m4) +{ + tcg_debug_assert(is_general_reg(r1)); + tcg_debug_assert(d2 >= 0 && d2 <= 0xfff); + tcg_debug_assert(is_general_reg(b2)); + tcg_debug_assert(is_vector_reg(v3)); + tcg_out16(s, (op & 0xff00) | (r1 << 4) | (v3 & 0xf)); + tcg_out16(s, b2 << 12 | d2); + tcg_out16(s, (op & 0x00ff) | RXB(0, 0, v3, 0) | (m4 << 12)); +} + +static void tcg_out_insn_VRX(TCGContext *s, S390Opcode op, TCGReg v1, + TCGReg b2, TCGReg x2, intptr_t d2, int m3) +{ + tcg_debug_assert(is_vector_reg(v1)); + tcg_debug_assert(d2 >= 0 && d2 <= 0xfff); + tcg_debug_assert(is_general_reg(x2)); + tcg_debug_assert(is_general_reg(b2)); + tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | x2); + tcg_out16(s, (b2 << 12) | d2); + tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m3 << 12)); +} + /* Emit an opcode with "type-checking" of the format. */ #define tcg_out_insn(S, FMT, OP, ...) \ glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__) @@ -517,12 +740,38 @@ static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest, static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src) { - if (src != dst) { - if (type == TCG_TYPE_I32) { + if (src == dst) { + return true; + } + switch (type) { + case TCG_TYPE_I32: + if (likely(is_general_reg(dst) && is_general_reg(src))) { tcg_out_insn(s, RR, LR, dst, src); - } else { - tcg_out_insn(s, RRE, LGR, dst, src); + break; } + /* fallthru */ + + case TCG_TYPE_I64: + if (likely(is_general_reg(dst))) { + if (likely(is_general_reg(src))) { + tcg_out_insn(s, RRE, LGR, dst, src); + } else { + tcg_out_insn(s, VRSc, VLGV, dst, 0, 0, src, 3); + } + break; + } else if (is_general_reg(src)) { + tcg_out_insn(s, VRSb, VLVG, dst, 0, 0, src, 3); + break; + } + /* fallthru */ + + case TCG_TYPE_V64: + case TCG_TYPE_V128: + tcg_out_insn(s, VRRa, VLR, dst, src, 0); + break; + + default: + g_assert_not_reached(); } return true; } @@ -577,7 +826,7 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, } /* Try all 48-bit insns that can load it in one go. */ - if (s390_facilities & FACILITY_EXT_IMM) { + if (HAVE_FACILITY(EXT_IMM)) { if (sval == (int32_t)sval) { tcg_out_insn(s, RIL, LGFI, ret, sval); return; @@ -620,7 +869,7 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, } /* Otherwise, stuff it in the constant pool. */ - if (s390_facilities & FACILITY_GEN_INST_EXT) { + if (HAVE_FACILITY(GEN_INST_EXT)) { tcg_out_insn(s, RIL, LGRL, ret, 0); new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2); } else if (USE_REG_TB && !in_prologue) { @@ -672,25 +921,92 @@ static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy, } } +static void tcg_out_vrx_mem(TCGContext *s, S390Opcode opc_vrx, + TCGReg data, TCGReg base, TCGReg index, + tcg_target_long ofs, int m3) +{ + if (ofs < 0 || ofs >= 0x1000) { + if (ofs >= -0x80000 && ofs < 0x80000) { + tcg_out_insn(s, RXY, LAY, TCG_TMP0, base, index, ofs); + base = TCG_TMP0; + index = TCG_REG_NONE; + ofs = 0; + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs); + if (index != TCG_REG_NONE) { + tcg_out_insn(s, RRE, AGR, TCG_TMP0, index); + } + index = TCG_TMP0; + ofs = 0; + } + } + tcg_out_insn_VRX(s, opc_vrx, data, base, index, ofs, m3); +} /* load data without address translation or endianness conversion */ -static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data, - TCGReg base, intptr_t ofs) +static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, intptr_t ofs) { - if (type == TCG_TYPE_I32) { - tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs); - } else { - tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs); + switch (type) { + case TCG_TYPE_I32: + if (likely(is_general_reg(data))) { + tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs); + break; + } + tcg_out_vrx_mem(s, VRX_VLLEZ, data, base, TCG_REG_NONE, ofs, MO_32); + break; + + case TCG_TYPE_I64: + if (likely(is_general_reg(data))) { + tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs); + break; + } + /* fallthru */ + + case TCG_TYPE_V64: + tcg_out_vrx_mem(s, VRX_VLLEZ, data, base, TCG_REG_NONE, ofs, MO_64); + break; + + case TCG_TYPE_V128: + /* Hint quadword aligned. */ + tcg_out_vrx_mem(s, VRX_VL, data, base, TCG_REG_NONE, ofs, 4); + break; + + default: + g_assert_not_reached(); } } -static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg data, - TCGReg base, intptr_t ofs) +static void tcg_out_st(TCGContext *s, TCGType type, TCGReg data, + TCGReg base, intptr_t ofs) { - if (type == TCG_TYPE_I32) { - tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs); - } else { - tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs); + switch (type) { + case TCG_TYPE_I32: + if (likely(is_general_reg(data))) { + tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs); + } else { + tcg_out_vrx_mem(s, VRX_VSTEF, data, base, TCG_REG_NONE, ofs, 1); + } + break; + + case TCG_TYPE_I64: + if (likely(is_general_reg(data))) { + tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs); + break; + } + /* fallthru */ + + case TCG_TYPE_V64: + tcg_out_vrx_mem(s, VRX_VSTEG, data, base, TCG_REG_NONE, ofs, 0); + break; + + case TCG_TYPE_V128: + /* Hint quadword aligned. */ + tcg_out_vrx_mem(s, VRX_VST, data, base, TCG_REG_NONE, ofs, 4); + break; + + default: + g_assert_not_reached(); } } @@ -706,7 +1022,7 @@ static void tcg_out_ld_abs(TCGContext *s, TCGType type, { intptr_t addr = (intptr_t)abs; - if ((s390_facilities & FACILITY_GEN_INST_EXT) && !(addr & 1)) { + if (HAVE_FACILITY(GEN_INST_EXT) && !(addr & 1)) { ptrdiff_t disp = tcg_pcrel_diff(s, abs) >> 1; if (disp == (int32_t)disp) { if (type == TCG_TYPE_I32) { @@ -740,7 +1056,7 @@ static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src, static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) { - if (s390_facilities & FACILITY_EXT_IMM) { + if (HAVE_FACILITY(EXT_IMM)) { tcg_out_insn(s, RRE, LGBR, dest, src); return; } @@ -760,7 +1076,7 @@ static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) { - if (s390_facilities & FACILITY_EXT_IMM) { + if (HAVE_FACILITY(EXT_IMM)) { tcg_out_insn(s, RRE, LLGCR, dest, src); return; } @@ -780,7 +1096,7 @@ static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) { - if (s390_facilities & FACILITY_EXT_IMM) { + if (HAVE_FACILITY(EXT_IMM)) { tcg_out_insn(s, RRE, LGHR, dest, src); return; } @@ -800,7 +1116,7 @@ static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src) { - if (s390_facilities & FACILITY_EXT_IMM) { + if (HAVE_FACILITY(EXT_IMM)) { tcg_out_insn(s, RRE, LLGHR, dest, src); return; } @@ -888,7 +1204,7 @@ static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) tgen_ext32u(s, dest, dest); return; } - if (s390_facilities & FACILITY_EXT_IMM) { + if (HAVE_FACILITY(EXT_IMM)) { if ((val & valid) == 0xff) { tgen_ext8u(s, TCG_TYPE_I64, dest, dest); return; @@ -909,7 +1225,7 @@ static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) } /* Try all 48-bit insns that can perform it in one go. */ - if (s390_facilities & FACILITY_EXT_IMM) { + if (HAVE_FACILITY(EXT_IMM)) { for (i = 0; i < 2; i++) { tcg_target_ulong mask = ~(0xffffffffull << i*32); if (((val | ~valid) & mask) == mask) { @@ -918,7 +1234,7 @@ static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) } } } - if ((s390_facilities & FACILITY_GEN_INST_EXT) && risbg_mask(val)) { + if (HAVE_FACILITY(GEN_INST_EXT) && risbg_mask(val)) { tgen_andi_risbg(s, dest, dest, val); return; } @@ -967,7 +1283,7 @@ static void tgen_ori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) } /* Try all 48-bit insns that can perform it in one go. */ - if (s390_facilities & FACILITY_EXT_IMM) { + if (HAVE_FACILITY(EXT_IMM)) { for (i = 0; i < 2; i++) { tcg_target_ulong mask = (0xffffffffull << i*32); if ((val & mask) != 0 && (val & ~mask) == 0) { @@ -992,7 +1308,7 @@ static void tgen_ori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) /* Perform the OR via sequential modifications to the high and low parts. Do this via recursion to handle 16-bit vs 32-bit masks in each half. */ - tcg_debug_assert(s390_facilities & FACILITY_EXT_IMM); + tcg_debug_assert(HAVE_FACILITY(EXT_IMM)); tgen_ori(s, type, dest, val & 0x00000000ffffffffull); tgen_ori(s, type, dest, val & 0xffffffff00000000ull); } @@ -1001,7 +1317,7 @@ static void tgen_ori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) static void tgen_xori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) { /* Try all 48-bit insns that can perform it in one go. */ - if (s390_facilities & FACILITY_EXT_IMM) { + if (HAVE_FACILITY(EXT_IMM)) { if ((val & 0xffffffff00000000ull) == 0) { tcg_out_insn(s, RIL, XILF, dest, val); return; @@ -1025,7 +1341,7 @@ static void tgen_xori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val) tcg_tbrel_diff(s, NULL)); } else { /* Perform the xor by parts. */ - tcg_debug_assert(s390_facilities & FACILITY_EXT_IMM); + tcg_debug_assert(HAVE_FACILITY(EXT_IMM)); if (val & 0xffffffff) { tcg_out_insn(s, RIL, XILF, dest, val); } @@ -1059,7 +1375,7 @@ static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1, goto exit; } - if (s390_facilities & FACILITY_EXT_IMM) { + if (HAVE_FACILITY(EXT_IMM)) { if (type == TCG_TYPE_I32) { op = (is_unsigned ? RIL_CLFI : RIL_CFI); tcg_out_insn_RIL(s, op, r1, c2); @@ -1122,7 +1438,7 @@ static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, bool have_loc; /* With LOC2, we can always emit the minimum 3 insns. */ - if (s390_facilities & FACILITY_LOAD_ON_COND2) { + if (HAVE_FACILITY(LOAD_ON_COND2)) { /* Emit: d = 0, d = (cc ? 1 : d). */ cc = tgen_cmp(s, type, cond, c1, c2, c2const, false); tcg_out_movi(s, TCG_TYPE_I64, dest, 0); @@ -1130,7 +1446,7 @@ static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, return; } - have_loc = (s390_facilities & FACILITY_LOAD_ON_COND) != 0; + have_loc = HAVE_FACILITY(LOAD_ON_COND); /* For HAVE_LOC, only the paths through GTU/GT/LEU/LE are smaller. */ restart: @@ -1216,7 +1532,7 @@ static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest, TCGArg v3, int v3const) { int cc; - if (s390_facilities & FACILITY_LOAD_ON_COND) { + if (HAVE_FACILITY(LOAD_ON_COND)) { cc = tgen_cmp(s, type, c, c1, c2, c2const, false); if (v3const) { tcg_out_insn(s, RIE, LOCGHI, dest, v3, cc); @@ -1249,7 +1565,7 @@ static void tgen_clz(TCGContext *s, TCGReg dest, TCGReg a1, } else { tcg_out_mov(s, TCG_TYPE_I64, dest, a2); } - if (s390_facilities & FACILITY_LOAD_ON_COND) { + if (HAVE_FACILITY(LOAD_ON_COND)) { /* Emit: if (one bit found) dest = r0. */ tcg_out_insn(s, RRF, LOCGR, dest, TCG_REG_R0, 2); } else { @@ -1325,7 +1641,7 @@ static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c, { int cc; - if (s390_facilities & FACILITY_GEN_INST_EXT) { + if (HAVE_FACILITY(GEN_INST_EXT)) { bool is_unsigned = is_unsigned_cond(c); bool in_range; S390Opcode opc; @@ -1519,7 +1835,7 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, cross pages using the address of the last byte of the access. */ a_off = (a_bits >= s_bits ? 0 : s_mask - a_mask); tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask; - if ((s390_facilities & FACILITY_GEN_INST_EXT) && a_off == 0) { + if (HAVE_FACILITY(GEN_INST_EXT) && a_off == 0) { tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask); } else { tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off); @@ -1547,7 +1863,7 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, return addr_reg; } -static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi, +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, TCGReg data, TCGReg addr, tcg_insn_unit *raddr, tcg_insn_unit *label_ptr) { @@ -1565,7 +1881,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg addr_reg = lb->addrlo_reg; TCGReg data_reg = lb->datalo_reg; - TCGMemOpIdx oi = lb->oi; + MemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, @@ -1590,7 +1906,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) { TCGReg addr_reg = lb->addrlo_reg; TCGReg data_reg = lb->datalo_reg; - TCGMemOpIdx oi = lb->oi; + MemOpIdx oi = lb->oi; MemOp opc = get_memop(oi); if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL, @@ -1644,7 +1960,7 @@ static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg, #endif /* CONFIG_SOFTMMU */ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, - TCGMemOpIdx oi) + MemOpIdx oi) { MemOp opc = get_memop(oi); #ifdef CONFIG_SOFTMMU @@ -1671,7 +1987,7 @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, } static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg, - TCGMemOpIdx oi) + MemOpIdx oi) { MemOp opc = get_memop(oi); #ifdef CONFIG_SOFTMMU @@ -1810,7 +2126,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_insn(s, RI, AHI, a0, a2); break; } - if (s390_facilities & FACILITY_EXT_IMM) { + if (HAVE_FACILITY(EXT_IMM)) { tcg_out_insn(s, RIL, AFI, a0, a2); break; } @@ -2056,7 +2372,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_insn(s, RI, AGHI, a0, a2); break; } - if (s390_facilities & FACILITY_EXT_IMM) { + if (HAVE_FACILITY(EXT_IMM)) { if (a2 == (int32_t)a2) { tcg_out_insn(s, RIL, AGFI, a0, a2); break; @@ -2281,8 +2597,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, /* The host memory model is quite strong, we simply need to serialize the instruction stream. */ if (args[0] & TCG_MO_ST_LD) { - tcg_out_insn(s, RR, BCR, - s390_facilities & FACILITY_FAST_BCR_SER ? 14 : 15, 0); + tcg_out_insn(s, RR, BCR, HAVE_FACILITY(FAST_BCR_SER) ? 14 : 15, 0); } break; @@ -2294,6 +2609,424 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, } } +static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg dst, TCGReg src) +{ + if (is_general_reg(src)) { + /* Replicate general register into two MO_64. */ + tcg_out_insn(s, VRRf, VLVGP, dst, src, src); + if (vece == MO_64) { + return true; + } + } + + /* + * Recall that the "standard" integer, within a vector, is the + * rightmost element of the leftmost doubleword, a-la VLLEZ. + */ + tcg_out_insn(s, VRIc, VREP, dst, (8 >> vece) - 1, src, vece); + return true; +} + +static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg dst, TCGReg base, intptr_t offset) +{ + tcg_out_vrx_mem(s, VRX_VLREP, dst, base, TCG_REG_NONE, offset, vece); + return true; +} + +static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, + TCGReg dst, int64_t val) +{ + int i, mask, msb, lsb; + + /* Look for int16_t elements. */ + if (vece <= MO_16 || + (vece == MO_32 ? (int32_t)val : val) == (int16_t)val) { + tcg_out_insn(s, VRIa, VREPI, dst, val, vece); + return; + } + + /* Look for bit masks. */ + if (vece == MO_32) { + if (risbg_mask((int32_t)val)) { + /* Handle wraparound by swapping msb and lsb. */ + if ((val & 0x80000001u) == 0x80000001u) { + msb = 32 - ctz32(~val); + lsb = clz32(~val) - 1; + } else { + msb = clz32(val); + lsb = 31 - ctz32(val); + } + tcg_out_insn(s, VRIb, VGM, dst, lsb, msb, MO_32); + return; + } + } else { + if (risbg_mask(val)) { + /* Handle wraparound by swapping msb and lsb. */ + if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) { + /* Handle wraparound by swapping msb and lsb. */ + msb = 64 - ctz64(~val); + lsb = clz64(~val) - 1; + } else { + msb = clz64(val); + lsb = 63 - ctz64(val); + } + tcg_out_insn(s, VRIb, VGM, dst, lsb, msb, MO_64); + return; + } + } + + /* Look for all bytes 0x00 or 0xff. */ + for (i = mask = 0; i < 8; i++) { + uint8_t byte = val >> (i * 8); + if (byte == 0xff) { + mask |= 1 << i; + } else if (byte != 0) { + break; + } + } + if (i == 8) { + tcg_out_insn(s, VRIa, VGBM, dst, mask * 0x0101, 0); + return; + } + + /* Otherwise, stuff it in the constant pool. */ + tcg_out_insn(s, RIL, LARL, TCG_TMP0, 0); + new_pool_label(s, val, R_390_PC32DBL, s->code_ptr - 2, 2); + tcg_out_insn(s, VRX, VLREP, dst, TCG_TMP0, TCG_REG_NONE, 0, MO_64); +} + +static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, + unsigned vecl, unsigned vece, + const TCGArg args[TCG_MAX_OP_ARGS], + const int const_args[TCG_MAX_OP_ARGS]) +{ + TCGType type = vecl + TCG_TYPE_V64; + TCGArg a0 = args[0], a1 = args[1], a2 = args[2]; + + switch (opc) { + case INDEX_op_ld_vec: + tcg_out_ld(s, type, a0, a1, a2); + break; + case INDEX_op_st_vec: + tcg_out_st(s, type, a0, a1, a2); + break; + case INDEX_op_dupm_vec: + tcg_out_dupm_vec(s, type, vece, a0, a1, a2); + break; + + case INDEX_op_abs_vec: + tcg_out_insn(s, VRRa, VLP, a0, a1, vece); + break; + case INDEX_op_neg_vec: + tcg_out_insn(s, VRRa, VLC, a0, a1, vece); + break; + case INDEX_op_not_vec: + tcg_out_insn(s, VRRc, VNO, a0, a1, a1, 0); + break; + + case INDEX_op_add_vec: + tcg_out_insn(s, VRRc, VA, a0, a1, a2, vece); + break; + case INDEX_op_sub_vec: + tcg_out_insn(s, VRRc, VS, a0, a1, a2, vece); + break; + case INDEX_op_and_vec: + tcg_out_insn(s, VRRc, VN, a0, a1, a2, 0); + break; + case INDEX_op_andc_vec: + tcg_out_insn(s, VRRc, VNC, a0, a1, a2, 0); + break; + case INDEX_op_mul_vec: + tcg_out_insn(s, VRRc, VML, a0, a1, a2, vece); + break; + case INDEX_op_or_vec: + tcg_out_insn(s, VRRc, VO, a0, a1, a2, 0); + break; + case INDEX_op_orc_vec: + tcg_out_insn(s, VRRc, VOC, a0, a1, a2, 0); + break; + case INDEX_op_xor_vec: + tcg_out_insn(s, VRRc, VX, a0, a1, a2, 0); + break; + + case INDEX_op_shli_vec: + tcg_out_insn(s, VRSa, VESL, a0, a2, TCG_REG_NONE, a1, vece); + break; + case INDEX_op_shri_vec: + tcg_out_insn(s, VRSa, VESRL, a0, a2, TCG_REG_NONE, a1, vece); + break; + case INDEX_op_sari_vec: + tcg_out_insn(s, VRSa, VESRA, a0, a2, TCG_REG_NONE, a1, vece); + break; + case INDEX_op_rotli_vec: + tcg_out_insn(s, VRSa, VERLL, a0, a2, TCG_REG_NONE, a1, vece); + break; + case INDEX_op_shls_vec: + tcg_out_insn(s, VRSa, VESL, a0, 0, a2, a1, vece); + break; + case INDEX_op_shrs_vec: + tcg_out_insn(s, VRSa, VESRL, a0, 0, a2, a1, vece); + break; + case INDEX_op_sars_vec: + tcg_out_insn(s, VRSa, VESRA, a0, 0, a2, a1, vece); + break; + case INDEX_op_rotls_vec: + tcg_out_insn(s, VRSa, VERLL, a0, 0, a2, a1, vece); + break; + case INDEX_op_shlv_vec: + tcg_out_insn(s, VRRc, VESLV, a0, a1, a2, vece); + break; + case INDEX_op_shrv_vec: + tcg_out_insn(s, VRRc, VESRLV, a0, a1, a2, vece); + break; + case INDEX_op_sarv_vec: + tcg_out_insn(s, VRRc, VESRAV, a0, a1, a2, vece); + break; + case INDEX_op_rotlv_vec: + tcg_out_insn(s, VRRc, VERLLV, a0, a1, a2, vece); + break; + + case INDEX_op_smin_vec: + tcg_out_insn(s, VRRc, VMN, a0, a1, a2, vece); + break; + case INDEX_op_smax_vec: + tcg_out_insn(s, VRRc, VMX, a0, a1, a2, vece); + break; + case INDEX_op_umin_vec: + tcg_out_insn(s, VRRc, VMNL, a0, a1, a2, vece); + break; + case INDEX_op_umax_vec: + tcg_out_insn(s, VRRc, VMXL, a0, a1, a2, vece); + break; + + case INDEX_op_bitsel_vec: + tcg_out_insn(s, VRRe, VSEL, a0, a1, a2, args[3]); + break; + + case INDEX_op_cmp_vec: + switch ((TCGCond)args[3]) { + case TCG_COND_EQ: + tcg_out_insn(s, VRRc, VCEQ, a0, a1, a2, vece); + break; + case TCG_COND_GT: + tcg_out_insn(s, VRRc, VCH, a0, a1, a2, vece); + break; + case TCG_COND_GTU: + tcg_out_insn(s, VRRc, VCHL, a0, a1, a2, vece); + break; + default: + g_assert_not_reached(); + } + break; + + case INDEX_op_s390_vuph_vec: + tcg_out_insn(s, VRRa, VUPH, a0, a1, vece); + break; + case INDEX_op_s390_vupl_vec: + tcg_out_insn(s, VRRa, VUPL, a0, a1, vece); + break; + case INDEX_op_s390_vpks_vec: + tcg_out_insn(s, VRRc, VPKS, a0, a1, a2, vece); + break; + + case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */ + case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */ + default: + g_assert_not_reached(); + } +} + +int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) +{ + switch (opc) { + case INDEX_op_abs_vec: + case INDEX_op_add_vec: + case INDEX_op_and_vec: + case INDEX_op_andc_vec: + case INDEX_op_bitsel_vec: + case INDEX_op_neg_vec: + case INDEX_op_not_vec: + case INDEX_op_or_vec: + case INDEX_op_orc_vec: + case INDEX_op_rotli_vec: + case INDEX_op_rotls_vec: + case INDEX_op_rotlv_vec: + case INDEX_op_sari_vec: + case INDEX_op_sars_vec: + case INDEX_op_sarv_vec: + case INDEX_op_shli_vec: + case INDEX_op_shls_vec: + case INDEX_op_shlv_vec: + case INDEX_op_shri_vec: + case INDEX_op_shrs_vec: + case INDEX_op_shrv_vec: + case INDEX_op_smax_vec: + case INDEX_op_smin_vec: + case INDEX_op_sub_vec: + case INDEX_op_umax_vec: + case INDEX_op_umin_vec: + case INDEX_op_xor_vec: + return 1; + case INDEX_op_cmp_vec: + case INDEX_op_cmpsel_vec: + case INDEX_op_rotrv_vec: + return -1; + case INDEX_op_mul_vec: + return vece < MO_64; + case INDEX_op_ssadd_vec: + case INDEX_op_sssub_vec: + return vece < MO_64 ? -1 : 0; + default: + return 0; + } +} + +static bool expand_vec_cmp_noinv(TCGType type, unsigned vece, TCGv_vec v0, + TCGv_vec v1, TCGv_vec v2, TCGCond cond) +{ + bool need_swap = false, need_inv = false; + + switch (cond) { + case TCG_COND_EQ: + case TCG_COND_GT: + case TCG_COND_GTU: + break; + case TCG_COND_NE: + case TCG_COND_LE: + case TCG_COND_LEU: + need_inv = true; + break; + case TCG_COND_LT: + case TCG_COND_LTU: + need_swap = true; + break; + case TCG_COND_GE: + case TCG_COND_GEU: + need_swap = need_inv = true; + break; + default: + g_assert_not_reached(); + } + + if (need_inv) { + cond = tcg_invert_cond(cond); + } + if (need_swap) { + TCGv_vec t1; + t1 = v1, v1 = v2, v2 = t1; + cond = tcg_swap_cond(cond); + } + + vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0), + tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond); + + return need_inv; +} + +static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0, + TCGv_vec v1, TCGv_vec v2, TCGCond cond) +{ + if (expand_vec_cmp_noinv(type, vece, v0, v1, v2, cond)) { + tcg_gen_not_vec(vece, v0, v0); + } +} + +static void expand_vec_cmpsel(TCGType type, unsigned vece, TCGv_vec v0, + TCGv_vec c1, TCGv_vec c2, + TCGv_vec v3, TCGv_vec v4, TCGCond cond) +{ + TCGv_vec t = tcg_temp_new_vec(type); + + if (expand_vec_cmp_noinv(type, vece, t, c1, c2, cond)) { + /* Invert the sense of the compare by swapping arguments. */ + tcg_gen_bitsel_vec(vece, v0, t, v4, v3); + } else { + tcg_gen_bitsel_vec(vece, v0, t, v3, v4); + } + tcg_temp_free_vec(t); +} + +static void expand_vec_sat(TCGType type, unsigned vece, TCGv_vec v0, + TCGv_vec v1, TCGv_vec v2, TCGOpcode add_sub_opc) +{ + TCGv_vec h1 = tcg_temp_new_vec(type); + TCGv_vec h2 = tcg_temp_new_vec(type); + TCGv_vec l1 = tcg_temp_new_vec(type); + TCGv_vec l2 = tcg_temp_new_vec(type); + + tcg_debug_assert (vece < MO_64); + + /* Unpack with sign-extension. */ + vec_gen_2(INDEX_op_s390_vuph_vec, type, vece, + tcgv_vec_arg(h1), tcgv_vec_arg(v1)); + vec_gen_2(INDEX_op_s390_vuph_vec, type, vece, + tcgv_vec_arg(h2), tcgv_vec_arg(v2)); + + vec_gen_2(INDEX_op_s390_vupl_vec, type, vece, + tcgv_vec_arg(l1), tcgv_vec_arg(v1)); + vec_gen_2(INDEX_op_s390_vupl_vec, type, vece, + tcgv_vec_arg(l2), tcgv_vec_arg(v2)); + + /* Arithmetic on a wider element size. */ + vec_gen_3(add_sub_opc, type, vece + 1, tcgv_vec_arg(h1), + tcgv_vec_arg(h1), tcgv_vec_arg(h2)); + vec_gen_3(add_sub_opc, type, vece + 1, tcgv_vec_arg(l1), + tcgv_vec_arg(l1), tcgv_vec_arg(l2)); + + /* Pack with saturation. */ + vec_gen_3(INDEX_op_s390_vpks_vec, type, vece + 1, + tcgv_vec_arg(v0), tcgv_vec_arg(h1), tcgv_vec_arg(l1)); + + tcg_temp_free_vec(h1); + tcg_temp_free_vec(h2); + tcg_temp_free_vec(l1); + tcg_temp_free_vec(l2); +} + +void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece, + TCGArg a0, ...) +{ + va_list va; + TCGv_vec v0, v1, v2, v3, v4, t0; + + va_start(va, a0); + v0 = temp_tcgv_vec(arg_temp(a0)); + v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); + v2 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); + + switch (opc) { + case INDEX_op_cmp_vec: + expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg)); + break; + + case INDEX_op_cmpsel_vec: + v3 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); + v4 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg))); + expand_vec_cmpsel(type, vece, v0, v1, v2, v3, v4, va_arg(va, TCGArg)); + break; + + case INDEX_op_rotrv_vec: + t0 = tcg_temp_new_vec(type); + tcg_gen_neg_vec(vece, t0, v2); + tcg_gen_rotlv_vec(vece, v0, v1, t0); + tcg_temp_free_vec(t0); + break; + + case INDEX_op_ssadd_vec: + expand_vec_sat(type, vece, v0, v1, v2, INDEX_op_add_vec); + break; + case INDEX_op_sssub_vec: + expand_vec_sat(type, vece, v0, v1, v2, INDEX_op_sub_vec); + break; + + default: + g_assert_not_reached(); + } + va_end(va); +} + static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) { switch (op) { @@ -2345,7 +3078,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_or_i64: case INDEX_op_xor_i32: case INDEX_op_xor_i64: - return (s390_facilities & FACILITY_DISTINCT_OPS + return (HAVE_FACILITY(DISTINCT_OPS) ? C_O1_I2(r, r, ri) : C_O1_I2(r, 0, ri)); @@ -2353,19 +3086,19 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) /* If we have the general-instruction-extensions, then we have MULTIPLY SINGLE IMMEDIATE with a signed 32-bit, otherwise we have only MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */ - return (s390_facilities & FACILITY_GEN_INST_EXT + return (HAVE_FACILITY(GEN_INST_EXT) ? C_O1_I2(r, 0, ri) : C_O1_I2(r, 0, rI)); case INDEX_op_mul_i64: - return (s390_facilities & FACILITY_GEN_INST_EXT + return (HAVE_FACILITY(GEN_INST_EXT) ? C_O1_I2(r, 0, rJ) : C_O1_I2(r, 0, rI)); case INDEX_op_shl_i32: case INDEX_op_shr_i32: case INDEX_op_sar_i32: - return (s390_facilities & FACILITY_DISTINCT_OPS + return (HAVE_FACILITY(DISTINCT_OPS) ? C_O1_I2(r, r, ri) : C_O1_I2(r, 0, ri)); @@ -2409,7 +3142,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_movcond_i32: case INDEX_op_movcond_i64: - return (s390_facilities & FACILITY_LOAD_ON_COND2 + return (HAVE_FACILITY(LOAD_ON_COND2) ? C_O1_I4(r, r, ri, rI, 0) : C_O1_I4(r, r, ri, r, 0)); @@ -2424,21 +3157,74 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_add2_i32: case INDEX_op_sub2_i32: - return (s390_facilities & FACILITY_EXT_IMM + return (HAVE_FACILITY(EXT_IMM) ? C_O2_I4(r, r, 0, 1, ri, r) : C_O2_I4(r, r, 0, 1, r, r)); case INDEX_op_add2_i64: case INDEX_op_sub2_i64: - return (s390_facilities & FACILITY_EXT_IMM + return (HAVE_FACILITY(EXT_IMM) ? C_O2_I4(r, r, 0, 1, rA, r) : C_O2_I4(r, r, 0, 1, r, r)); + case INDEX_op_st_vec: + return C_O0_I2(v, r); + case INDEX_op_ld_vec: + case INDEX_op_dupm_vec: + return C_O1_I1(v, r); + case INDEX_op_dup_vec: + return C_O1_I1(v, vr); + case INDEX_op_abs_vec: + case INDEX_op_neg_vec: + case INDEX_op_not_vec: + case INDEX_op_rotli_vec: + case INDEX_op_sari_vec: + case INDEX_op_shli_vec: + case INDEX_op_shri_vec: + case INDEX_op_s390_vuph_vec: + case INDEX_op_s390_vupl_vec: + return C_O1_I1(v, v); + case INDEX_op_add_vec: + case INDEX_op_sub_vec: + case INDEX_op_and_vec: + case INDEX_op_andc_vec: + case INDEX_op_or_vec: + case INDEX_op_orc_vec: + case INDEX_op_xor_vec: + case INDEX_op_cmp_vec: + case INDEX_op_mul_vec: + case INDEX_op_rotlv_vec: + case INDEX_op_rotrv_vec: + case INDEX_op_shlv_vec: + case INDEX_op_shrv_vec: + case INDEX_op_sarv_vec: + case INDEX_op_smax_vec: + case INDEX_op_smin_vec: + case INDEX_op_umax_vec: + case INDEX_op_umin_vec: + case INDEX_op_s390_vpks_vec: + return C_O1_I2(v, v, v); + case INDEX_op_rotls_vec: + case INDEX_op_shls_vec: + case INDEX_op_shrs_vec: + case INDEX_op_sars_vec: + return C_O1_I2(v, v, r); + case INDEX_op_bitsel_vec: + return C_O1_I3(v, v, v, v); + default: g_assert_not_reached(); } } +/* + * Mainline glibc added HWCAP_S390_VX before it was kernel abi. + * Some distros have fixed this up locally, others have not. + */ +#ifndef HWCAP_S390_VXRS +#define HWCAP_S390_VXRS 2048 +#endif + static void query_s390_facilities(void) { unsigned long hwcap = qemu_getauxval(AT_HWCAP); @@ -2446,13 +3232,22 @@ static void query_s390_facilities(void) /* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this is present on all 64-bit systems, but let's check for it anyway. */ if (hwcap & HWCAP_S390_STFLE) { - register int r0 __asm__("0"); - register void *r1 __asm__("1"); + register int r0 __asm__("0") = ARRAY_SIZE(s390_facilities) - 1; + register void *r1 __asm__("1") = s390_facilities; /* stfle 0(%r1) */ - r1 = &s390_facilities; asm volatile(".word 0xb2b0,0x1000" - : "=r"(r0) : "0"(0), "r"(r1) : "memory", "cc"); + : "=r"(r0) : "r"(r0), "r"(r1) : "memory", "cc"); + } + + /* + * Use of vector registers requires os support beyond the facility bit. + * If the kernel does not advertise support, disable the facility bits. + * There is nothing else we currently care about in the 3rd word, so + * disable VECTOR with one store. + */ + if (!(hwcap & HWCAP_S390_VXRS)) { + s390_facilities[2] = 0; } } @@ -2462,6 +3257,10 @@ static void tcg_target_init(TCGContext *s) tcg_target_available_regs[TCG_TYPE_I32] = 0xffff; tcg_target_available_regs[TCG_TYPE_I64] = 0xffff; + if (HAVE_FACILITY(VECTOR)) { + tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull; + tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull; + } tcg_target_call_clobber_regs = 0; tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0); @@ -2476,6 +3275,31 @@ static void tcg_target_init(TCGContext *s) /* The return register can be considered call-clobbered. */ tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V20); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V21); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V22); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V23); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V24); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V25); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V26); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V27); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V28); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V29); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V30); + tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V31); + s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_TMP0); /* XXX many insns can't be used with R0, so we better avoid it for now */ diff --git a/tcg/s390/tcg-target.h b/tcg/s390x/tcg-target.h similarity index 66% rename from tcg/s390/tcg-target.h rename to tcg/s390x/tcg-target.h index 2e4ede2ea2..527ada0f63 100644 --- a/tcg/s390/tcg-target.h +++ b/tcg/s390x/tcg-target.h @@ -32,39 +32,44 @@ #define MAX_CODE_GEN_BUFFER_SIZE (3 * GiB) typedef enum TCGReg { - TCG_REG_R0 = 0, - TCG_REG_R1, - TCG_REG_R2, - TCG_REG_R3, - TCG_REG_R4, - TCG_REG_R5, - TCG_REG_R6, - TCG_REG_R7, - TCG_REG_R8, - TCG_REG_R9, - TCG_REG_R10, - TCG_REG_R11, - TCG_REG_R12, - TCG_REG_R13, - TCG_REG_R14, - TCG_REG_R15 + TCG_REG_R0, TCG_REG_R1, TCG_REG_R2, TCG_REG_R3, + TCG_REG_R4, TCG_REG_R5, TCG_REG_R6, TCG_REG_R7, + TCG_REG_R8, TCG_REG_R9, TCG_REG_R10, TCG_REG_R11, + TCG_REG_R12, TCG_REG_R13, TCG_REG_R14, TCG_REG_R15, + + TCG_REG_V0 = 32, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3, + TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7, + TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11, + TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15, + TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, + TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, + TCG_REG_V24, TCG_REG_V25, TCG_REG_V26, TCG_REG_V27, + TCG_REG_V28, TCG_REG_V29, TCG_REG_V30, TCG_REG_V31, + + TCG_AREG0 = TCG_REG_R10, + TCG_REG_CALL_STACK = TCG_REG_R15 } TCGReg; -#define TCG_TARGET_NB_REGS 16 +#define TCG_TARGET_NB_REGS 64 /* A list of relevant facilities used by this translator. Some of these are required for proper operation, and these are checked at startup. */ -#define FACILITY_ZARCH_ACTIVE (1ULL << (63 - 2)) -#define FACILITY_LONG_DISP (1ULL << (63 - 18)) -#define FACILITY_EXT_IMM (1ULL << (63 - 21)) -#define FACILITY_GEN_INST_EXT (1ULL << (63 - 34)) -#define FACILITY_LOAD_ON_COND (1ULL << (63 - 45)) +#define FACILITY_ZARCH_ACTIVE 2 +#define FACILITY_LONG_DISP 18 +#define FACILITY_EXT_IMM 21 +#define FACILITY_GEN_INST_EXT 34 +#define FACILITY_LOAD_ON_COND 45 #define FACILITY_FAST_BCR_SER FACILITY_LOAD_ON_COND #define FACILITY_DISTINCT_OPS FACILITY_LOAD_ON_COND -#define FACILITY_LOAD_ON_COND2 (1ULL << (63 - 53)) +#define FACILITY_LOAD_ON_COND2 53 +#define FACILITY_VECTOR 129 +#define FACILITY_VECTOR_ENH1 135 -extern uint64_t s390_facilities; +extern uint64_t s390_facilities[3]; + +#define HAVE_FACILITY(X) \ + ((s390_facilities[FACILITY_##X / 64] >> (63 - FACILITY_##X % 64)) & 1) /* optional instructions */ #define TCG_TARGET_HAS_div2_i32 1 @@ -85,8 +90,8 @@ extern uint64_t s390_facilities; #define TCG_TARGET_HAS_clz_i32 0 #define TCG_TARGET_HAS_ctz_i32 0 #define TCG_TARGET_HAS_ctpop_i32 0 -#define TCG_TARGET_HAS_deposit_i32 (s390_facilities & FACILITY_GEN_INST_EXT) -#define TCG_TARGET_HAS_extract_i32 (s390_facilities & FACILITY_GEN_INST_EXT) +#define TCG_TARGET_HAS_deposit_i32 HAVE_FACILITY(GEN_INST_EXT) +#define TCG_TARGET_HAS_extract_i32 HAVE_FACILITY(GEN_INST_EXT) #define TCG_TARGET_HAS_sextract_i32 0 #define TCG_TARGET_HAS_extract2_i32 0 #define TCG_TARGET_HAS_movcond_i32 1 @@ -98,7 +103,7 @@ extern uint64_t s390_facilities; #define TCG_TARGET_HAS_mulsh_i32 0 #define TCG_TARGET_HAS_extrl_i64_i32 0 #define TCG_TARGET_HAS_extrh_i64_i32 0 -#define TCG_TARGET_HAS_direct_jump (s390_facilities & FACILITY_GEN_INST_EXT) +#define TCG_TARGET_HAS_direct_jump HAVE_FACILITY(GEN_INST_EXT) #define TCG_TARGET_HAS_qemu_st8_i32 0 #define TCG_TARGET_HAS_div2_i64 1 @@ -119,11 +124,11 @@ extern uint64_t s390_facilities; #define TCG_TARGET_HAS_eqv_i64 0 #define TCG_TARGET_HAS_nand_i64 0 #define TCG_TARGET_HAS_nor_i64 0 -#define TCG_TARGET_HAS_clz_i64 (s390_facilities & FACILITY_EXT_IMM) +#define TCG_TARGET_HAS_clz_i64 HAVE_FACILITY(EXT_IMM) #define TCG_TARGET_HAS_ctz_i64 0 #define TCG_TARGET_HAS_ctpop_i64 0 -#define TCG_TARGET_HAS_deposit_i64 (s390_facilities & FACILITY_GEN_INST_EXT) -#define TCG_TARGET_HAS_extract_i64 (s390_facilities & FACILITY_GEN_INST_EXT) +#define TCG_TARGET_HAS_deposit_i64 HAVE_FACILITY(GEN_INST_EXT) +#define TCG_TARGET_HAS_extract_i64 HAVE_FACILITY(GEN_INST_EXT) #define TCG_TARGET_HAS_sextract_i64 0 #define TCG_TARGET_HAS_extract2_i64 0 #define TCG_TARGET_HAS_movcond_i64 1 @@ -134,8 +139,28 @@ extern uint64_t s390_facilities; #define TCG_TARGET_HAS_muluh_i64 0 #define TCG_TARGET_HAS_mulsh_i64 0 +#define TCG_TARGET_HAS_v64 HAVE_FACILITY(VECTOR) +#define TCG_TARGET_HAS_v128 HAVE_FACILITY(VECTOR) +#define TCG_TARGET_HAS_v256 0 + +#define TCG_TARGET_HAS_andc_vec 1 +#define TCG_TARGET_HAS_orc_vec HAVE_FACILITY(VECTOR_ENH1) +#define TCG_TARGET_HAS_not_vec 1 +#define TCG_TARGET_HAS_neg_vec 1 +#define TCG_TARGET_HAS_abs_vec 1 +#define TCG_TARGET_HAS_roti_vec 1 +#define TCG_TARGET_HAS_rots_vec 1 +#define TCG_TARGET_HAS_rotv_vec 1 +#define TCG_TARGET_HAS_shi_vec 1 +#define TCG_TARGET_HAS_shs_vec 1 +#define TCG_TARGET_HAS_shv_vec 1 +#define TCG_TARGET_HAS_mul_vec 1 +#define TCG_TARGET_HAS_sat_vec 0 +#define TCG_TARGET_HAS_minmax_vec 1 +#define TCG_TARGET_HAS_bitsel_vec 1 +#define TCG_TARGET_HAS_cmpsel_vec 0 + /* used for function call generation */ -#define TCG_REG_CALL_STACK TCG_REG_R15 #define TCG_TARGET_STACK_ALIGN 8 #define TCG_TARGET_CALL_STACK_OFFSET 160 @@ -144,10 +169,6 @@ extern uint64_t s390_facilities; #define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) -enum { - TCG_AREG0 = TCG_REG_R10, -}; - static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx, uintptr_t jmp_rw, uintptr_t addr) { diff --git a/tcg/s390x/tcg-target.opc.h b/tcg/s390x/tcg-target.opc.h new file mode 100644 index 0000000000..0eb2350fb3 --- /dev/null +++ b/tcg/s390x/tcg-target.opc.h @@ -0,0 +1,15 @@ +/* + * Copyright (c) 2021 Linaro + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. + * + * See the COPYING file in the top-level directory for details. + * + * Target-specific opcodes for host vector expansion. These will be + * emitted by tcg_expand_vec_op. For those familiar with GCC internals, + * consider these to be UNSPEC with names. + */ +DEF(s390_vuph_vec, 1, 1, 0, IMPLVEC) +DEF(s390_vupl_vec, 1, 1, 0, IMPLVEC) +DEF(s390_vpks_vec, 1, 2, 0, IMPLVEC) diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc index 9720d76abd..9dd32ef95e 100644 --- a/tcg/sparc/tcg-target.c.inc +++ b/tcg/sparc/tcg-target.c.inc @@ -855,8 +855,8 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0) } #ifdef CONFIG_SOFTMMU -static const tcg_insn_unit *qemu_ld_trampoline[16]; -static const tcg_insn_unit *qemu_st_trampoline[16]; +static const tcg_insn_unit *qemu_ld_trampoline[(MO_SSIZE | MO_BSWAP) + 1]; +static const tcg_insn_unit *qemu_st_trampoline[(MO_SIZE | MO_BSWAP) + 1]; static void emit_extend(TCGContext *s, TCGReg r, int op) { @@ -883,7 +883,7 @@ static void emit_extend(TCGContext *s, TCGReg r, int op) static void build_trampolines(TCGContext *s) { - static void * const qemu_ld_helpers[16] = { + static void * const qemu_ld_helpers[] = { [MO_UB] = helper_ret_ldub_mmu, [MO_SB] = helper_ret_ldsb_mmu, [MO_LEUW] = helper_le_lduw_mmu, @@ -895,7 +895,7 @@ static void build_trampolines(TCGContext *s) [MO_BEUL] = helper_be_ldul_mmu, [MO_BEQ] = helper_be_ldq_mmu, }; - static void * const qemu_st_helpers[16] = { + static void * const qemu_st_helpers[] = { [MO_UB] = helper_ret_stb_mmu, [MO_LEUW] = helper_le_stw_mmu, [MO_LEUL] = helper_le_stl_mmu, @@ -908,7 +908,7 @@ static void build_trampolines(TCGContext *s) int i; TCGReg ra; - for (i = 0; i < 16; ++i) { + for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) { if (qemu_ld_helpers[i] == NULL) { continue; } @@ -936,7 +936,7 @@ static void build_trampolines(TCGContext *s) tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra); } - for (i = 0; i < 16; ++i) { + for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) { if (qemu_st_helpers[i] == NULL) { continue; } @@ -1118,7 +1118,7 @@ static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index, } #endif /* CONFIG_SOFTMMU */ -static const int qemu_ld_opc[16] = { +static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = { [MO_UB] = LDUB, [MO_SB] = LDSB, @@ -1135,7 +1135,7 @@ static const int qemu_ld_opc[16] = { [MO_LEQ] = LDX_LE, }; -static const int qemu_st_opc[16] = { +static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = { [MO_UB] = STB, [MO_BEUW] = STH, @@ -1148,7 +1148,7 @@ static const int qemu_st_opc[16] = { }; static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, - TCGMemOpIdx oi, bool is_64) + MemOpIdx oi, bool is_64) { MemOp memop = get_memop(oi); #ifdef CONFIG_SOFTMMU @@ -1230,7 +1230,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, } static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, - TCGMemOpIdx oi) + MemOpIdx oi) { MemOp memop = get_memop(oi); #ifdef CONFIG_SOFTMMU diff --git a/tcg/tcg-ldst.c.inc b/tcg/tcg-ldst.c.inc index c3ce88e69d..6c6848d034 100644 --- a/tcg/tcg-ldst.c.inc +++ b/tcg/tcg-ldst.c.inc @@ -22,7 +22,7 @@ typedef struct TCGLabelQemuLdst { bool is_ld; /* qemu_ld: true, qemu_st: false */ - TCGMemOpIdx oi; + MemOpIdx oi; TCGType type; /* result type of a load */ TCGReg addrlo_reg; /* reg index for low word of guest virtual addr */ TCGReg addrhi_reg; /* reg index for high word of guest virtual addr */ diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c index 15e026ae49..faf30f9cdd 100644 --- a/tcg/tcg-op-vec.c +++ b/tcg/tcg-op-vec.c @@ -119,6 +119,18 @@ bool tcg_can_emit_vecop_list(const TCGOpcode *list, continue; } break; + case INDEX_op_usadd_vec: + if (tcg_can_emit_vec_op(INDEX_op_umin_vec, type, vece) || + tcg_can_emit_vec_op(INDEX_op_cmp_vec, type, vece)) { + continue; + } + break; + case INDEX_op_ussub_vec: + if (tcg_can_emit_vec_op(INDEX_op_umax_vec, type, vece) || + tcg_can_emit_vec_op(INDEX_op_cmp_vec, type, vece)) { + continue; + } + break; case INDEX_op_cmpsel_vec: case INDEX_op_smin_vec: case INDEX_op_smax_vec: @@ -603,7 +615,18 @@ void tcg_gen_ssadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) void tcg_gen_usadd_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { - do_op3_nofail(vece, r, a, b, INDEX_op_usadd_vec); + if (!do_op3(vece, r, a, b, INDEX_op_usadd_vec)) { + const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL); + TCGv_vec t = tcg_temp_new_vec_matching(r); + + /* usadd(a, b) = min(a, ~b) + b */ + tcg_gen_not_vec(vece, t, b); + tcg_gen_umin_vec(vece, t, t, a); + tcg_gen_add_vec(vece, r, t, b); + + tcg_temp_free_vec(t); + tcg_swap_vecop_list(hold_list); + } } void tcg_gen_sssub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) @@ -613,7 +636,17 @@ void tcg_gen_sssub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) void tcg_gen_ussub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b) { - do_op3_nofail(vece, r, a, b, INDEX_op_ussub_vec); + if (!do_op3(vece, r, a, b, INDEX_op_ussub_vec)) { + const TCGOpcode *hold_list = tcg_swap_vecop_list(NULL); + TCGv_vec t = tcg_temp_new_vec_matching(r); + + /* ussub(a, b) = max(a, b) - b */ + tcg_gen_umax_vec(vece, t, a, b); + tcg_gen_sub_vec(vece, r, t, b); + + tcg_temp_free_vec(t); + tcg_swap_vecop_list(hold_list); + } } static void do_minmax(unsigned vece, TCGv_vec r, TCGv_vec a, diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index 5393e4ea22..bfeb514679 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -28,7 +28,6 @@ #include "tcg/tcg-op.h" #include "tcg/tcg-mo.h" #include "trace-tcg.h" -#include "trace/mem.h" #include "exec/plugin-gen.h" /* Reduce the number of ifdefs below. This assumes that all uses of @@ -2766,7 +2765,12 @@ void tcg_gen_lookup_and_goto_ptr(void) static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) { /* Trigger the asserts within as early as possible. */ - (void)get_alignment_bits(op); + unsigned a_bits = get_alignment_bits(op); + + /* Prefer MO_ALIGN+MO_XX over MO_ALIGN_XX+MO_XX */ + if (a_bits == (op & MO_SIZE)) { + op = (op & ~MO_AMASK) | MO_ALIGN; + } switch (op & MO_SIZE) { case MO_8: @@ -2780,10 +2784,13 @@ static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) } break; case MO_64: - if (!is64) { - tcg_abort(); + if (is64) { + op &= ~MO_SIGN; + break; } - break; + /* fall through */ + default: + g_assert_not_reached(); } if (st) { op &= ~MO_SIGN; @@ -2794,7 +2801,7 @@ static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr, MemOp memop, TCGArg idx) { - TCGMemOpIdx oi = make_memop_idx(memop, idx); + MemOpIdx oi = make_memop_idx(memop, idx); #if TARGET_LONG_BITS == 32 tcg_gen_op3i_i32(opc, val, addr, oi); #else @@ -2809,7 +2816,7 @@ static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr, static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr, MemOp memop, TCGArg idx) { - TCGMemOpIdx oi = make_memop_idx(memop, idx); + MemOpIdx oi = make_memop_idx(memop, idx); #if TARGET_LONG_BITS == 32 if (TCG_TARGET_REG_BITS == 32) { tcg_gen_op4i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), addr, oi); @@ -2850,10 +2857,12 @@ static inline TCGv plugin_prep_mem_callbacks(TCGv vaddr) return vaddr; } -static inline void plugin_gen_mem_callbacks(TCGv vaddr, uint16_t info) +static void plugin_gen_mem_callbacks(TCGv vaddr, MemOpIdx oi, + enum qemu_plugin_mem_rw rw) { #ifdef CONFIG_PLUGIN if (tcg_ctx->plugin_insn != NULL) { + qemu_plugin_meminfo_t info = make_plugin_meminfo(oi, rw); plugin_gen_empty_mem_callback(vaddr, info); tcg_temp_free(vaddr); } @@ -2870,11 +2879,12 @@ void libafl_gen_write(TCGv addr, MemOp ot); void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) { MemOp orig_memop; - uint16_t info = trace_mem_get_info(memop, idx, 0); + MemOpIdx oi; tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); memop = tcg_canonicalize_memop(memop, 0, 0); - trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); + oi = make_memop_idx(memop, idx); + trace_guest_ld_before_tcg(tcg_ctx->cpu, cpu_env, addr, oi); orig_memop = memop; if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { @@ -2894,7 +2904,7 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) //// --- End LibAFL code --- gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx); - plugin_gen_mem_callbacks(addr, info); + plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R); if ((orig_memop ^ memop) & MO_BSWAP) { switch (orig_memop & MO_SIZE) { @@ -2915,11 +2925,12 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) { TCGv_i32 swap = NULL; - uint16_t info = trace_mem_get_info(memop, idx, 1); + MemOpIdx oi; tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); memop = tcg_canonicalize_memop(memop, 0, 1); - trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); + oi = make_memop_idx(memop, idx); + trace_guest_st_before_tcg(tcg_ctx->cpu, cpu_env, addr, oi); if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { swap = tcg_temp_new_i32(); @@ -2950,7 +2961,7 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) } else { gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx); } - plugin_gen_mem_callbacks(addr, info); + plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W); if (swap) { tcg_temp_free_i32(swap); @@ -2960,7 +2971,7 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) { MemOp orig_memop; - uint16_t info; + MemOpIdx oi; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop); @@ -2974,8 +2985,8 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); memop = tcg_canonicalize_memop(memop, 1, 0); - info = trace_mem_get_info(memop, idx, 0); - trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); + oi = make_memop_idx(memop, idx); + trace_guest_ld_before_tcg(tcg_ctx->cpu, cpu_env, addr, oi); orig_memop = memop; if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { @@ -2995,7 +3006,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) //// --- End LibAFL code --- gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx); - plugin_gen_mem_callbacks(addr, info); + plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R); if ((orig_memop ^ memop) & MO_BSWAP) { int flags = (orig_memop & MO_SIGN @@ -3020,7 +3031,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) { TCGv_i64 swap = NULL; - uint16_t info; + MemOpIdx oi; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop); @@ -3029,8 +3040,8 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); memop = tcg_canonicalize_memop(memop, 1, 1); - info = trace_mem_get_info(memop, idx, 1); - trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); + oi = make_memop_idx(memop, idx); + trace_guest_st_before_tcg(tcg_ctx->cpu, cpu_env, addr, oi); if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { swap = tcg_temp_new_i64(); @@ -3060,7 +3071,7 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) //// --- End LibAFL code --- gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx); - plugin_gen_mem_callbacks(addr, info); + plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W); if (swap) { tcg_temp_free_i64(swap); @@ -3130,7 +3141,7 @@ typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv, # define WITH_ATOMIC64(X) #endif -static void * const table_cmpxchg[16] = { +static void * const table_cmpxchg[(MO_SIZE | MO_BSWAP) + 1] = { [MO_8] = gen_helper_atomic_cmpxchgb, [MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le, [MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be, @@ -3164,7 +3175,7 @@ void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, tcg_temp_free_i32(t1); } else { gen_atomic_cx_i32 gen; - TCGMemOpIdx oi; + MemOpIdx oi; gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; tcg_debug_assert(gen != NULL); @@ -3203,7 +3214,7 @@ void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, } else if ((memop & MO_SIZE) == MO_64) { #ifdef CONFIG_ATOMIC64 gen_atomic_cx_i64 gen; - TCGMemOpIdx oi; + MemOpIdx oi; gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; tcg_debug_assert(gen != NULL); @@ -3259,7 +3270,7 @@ static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop, void * const table[]) { gen_atomic_op_i32 gen; - TCGMemOpIdx oi; + MemOpIdx oi; memop = tcg_canonicalize_memop(memop, 0, 0); @@ -3301,7 +3312,7 @@ static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, if ((memop & MO_SIZE) == MO_64) { #ifdef CONFIG_ATOMIC64 gen_atomic_op_i64 gen; - TCGMemOpIdx oi; + MemOpIdx oi; gen = table[memop & (MO_SIZE | MO_BSWAP)]; tcg_debug_assert(gen != NULL); @@ -3332,7 +3343,7 @@ static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, } #define GEN_ATOMIC_HELPER(NAME, OP, NEW) \ -static void * const table_##NAME[16] = { \ +static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = { \ [MO_8] = gen_helper_atomic_##NAME##b, \ [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le, \ [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \ diff --git a/tcg/tcg.c b/tcg/tcg.c index da65267cf7..022aad58c2 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -58,6 +58,7 @@ #include "elf.h" #include "exec/log.h" +#include "tcg/tcg-ldst.h" #include "tcg-internal.h" #ifdef CONFIG_TCG_INTERPRETER @@ -1517,11 +1518,11 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) if (is_32bit) { TCGv_i64 temp = tcg_temp_new_i64(); - TCGv_i64 orig = temp_tcgv_i64(args[i]); + TCGv_i32 orig = temp_tcgv_i32(args[i]); if (is_signed) { - tcg_gen_ext32s_i64(temp, orig); + tcg_gen_ext_i32_i64(temp, orig); } else { - tcg_gen_ext32u_i64(temp, orig); + tcg_gen_extu_i32_i64(temp, orig); } args[i] = tcgv_i64_temp(temp); } @@ -1920,7 +1921,7 @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs) case INDEX_op_qemu_ld_i64: case INDEX_op_qemu_st_i64: { - TCGMemOpIdx oi = op->args[k++]; + MemOpIdx oi = op->args[k++]; MemOp op = get_memop(oi); unsigned ix = get_mmuidx(oi); @@ -4125,15 +4126,15 @@ static void tcg_profile_snapshot_table(TCGProfile *prof) tcg_profile_snapshot(prof, false, true); } -void tcg_dump_op_count(void) +void tcg_dump_op_count(GString *buf) { TCGProfile prof = {}; int i; tcg_profile_snapshot_table(&prof); for (i = 0; i < NB_OPS; i++) { - qemu_printf("%s %" PRId64 "\n", tcg_op_defs[i].name, - prof.table_op_count[i]); + g_string_append_printf(buf, "%s %" PRId64 "\n", tcg_op_defs[i].name, + prof.table_op_count[i]); } } @@ -4152,9 +4153,9 @@ int64_t tcg_cpu_exec_time(void) return ret; } #else -void tcg_dump_op_count(void) +void tcg_dump_op_count(GString *buf) { - qemu_printf("[TCG profiler not compiled]\n"); + g_string_append_printf(buf, "[TCG profiler not compiled]\n"); } int64_t tcg_cpu_exec_time(void) @@ -4392,7 +4393,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) } #ifdef CONFIG_PROFILER -void tcg_dump_info(void) +void tcg_dump_info(GString *buf) { TCGProfile prof = {}; const TCGProfile *s; @@ -4406,53 +4407,59 @@ void tcg_dump_info(void) tb_div_count = tb_count ? tb_count : 1; tot = s->interm_time + s->code_time; - qemu_printf("JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n", - tot, tot / 2.4e9); - qemu_printf("translated TBs %" PRId64 " (aborted=%" PRId64 - " %0.1f%%)\n", - tb_count, s->tb_count1 - tb_count, - (double)(s->tb_count1 - s->tb_count) - / (s->tb_count1 ? s->tb_count1 : 1) * 100.0); - qemu_printf("avg ops/TB %0.1f max=%d\n", - (double)s->op_count / tb_div_count, s->op_count_max); - qemu_printf("deleted ops/TB %0.2f\n", - (double)s->del_op_count / tb_div_count); - qemu_printf("avg temps/TB %0.2f max=%d\n", - (double)s->temp_count / tb_div_count, s->temp_count_max); - qemu_printf("avg host code/TB %0.1f\n", - (double)s->code_out_len / tb_div_count); - qemu_printf("avg search data/TB %0.1f\n", - (double)s->search_out_len / tb_div_count); + g_string_append_printf(buf, "JIT cycles %" PRId64 + " (%0.3f s at 2.4 GHz)\n", + tot, tot / 2.4e9); + g_string_append_printf(buf, "translated TBs %" PRId64 + " (aborted=%" PRId64 " %0.1f%%)\n", + tb_count, s->tb_count1 - tb_count, + (double)(s->tb_count1 - s->tb_count) + / (s->tb_count1 ? s->tb_count1 : 1) * 100.0); + g_string_append_printf(buf, "avg ops/TB %0.1f max=%d\n", + (double)s->op_count / tb_div_count, s->op_count_max); + g_string_append_printf(buf, "deleted ops/TB %0.2f\n", + (double)s->del_op_count / tb_div_count); + g_string_append_printf(buf, "avg temps/TB %0.2f max=%d\n", + (double)s->temp_count / tb_div_count, + s->temp_count_max); + g_string_append_printf(buf, "avg host code/TB %0.1f\n", + (double)s->code_out_len / tb_div_count); + g_string_append_printf(buf, "avg search data/TB %0.1f\n", + (double)s->search_out_len / tb_div_count); - qemu_printf("cycles/op %0.1f\n", - s->op_count ? (double)tot / s->op_count : 0); - qemu_printf("cycles/in byte %0.1f\n", - s->code_in_len ? (double)tot / s->code_in_len : 0); - qemu_printf("cycles/out byte %0.1f\n", - s->code_out_len ? (double)tot / s->code_out_len : 0); - qemu_printf("cycles/search byte %0.1f\n", - s->search_out_len ? (double)tot / s->search_out_len : 0); + g_string_append_printf(buf, "cycles/op %0.1f\n", + s->op_count ? (double)tot / s->op_count : 0); + g_string_append_printf(buf, "cycles/in byte %0.1f\n", + s->code_in_len ? (double)tot / s->code_in_len : 0); + g_string_append_printf(buf, "cycles/out byte %0.1f\n", + s->code_out_len ? (double)tot / s->code_out_len : 0); + g_string_append_printf(buf, "cycles/search byte %0.1f\n", + s->search_out_len ? + (double)tot / s->search_out_len : 0); if (tot == 0) { tot = 1; } - qemu_printf(" gen_interm time %0.1f%%\n", - (double)s->interm_time / tot * 100.0); - qemu_printf(" gen_code time %0.1f%%\n", - (double)s->code_time / tot * 100.0); - qemu_printf("optim./code time %0.1f%%\n", - (double)s->opt_time / (s->code_time ? s->code_time : 1) - * 100.0); - qemu_printf("liveness/code time %0.1f%%\n", - (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0); - qemu_printf("cpu_restore count %" PRId64 "\n", - s->restore_count); - qemu_printf(" avg cycles %0.1f\n", - s->restore_count ? (double)s->restore_time / s->restore_count : 0); + g_string_append_printf(buf, " gen_interm time %0.1f%%\n", + (double)s->interm_time / tot * 100.0); + g_string_append_printf(buf, " gen_code time %0.1f%%\n", + (double)s->code_time / tot * 100.0); + g_string_append_printf(buf, "optim./code time %0.1f%%\n", + (double)s->opt_time / (s->code_time ? + s->code_time : 1) + * 100.0); + g_string_append_printf(buf, "liveness/code time %0.1f%%\n", + (double)s->la_time / (s->code_time ? + s->code_time : 1) * 100.0); + g_string_append_printf(buf, "cpu_restore count %" PRId64 "\n", + s->restore_count); + g_string_append_printf(buf, " avg cycles %0.1f\n", + s->restore_count ? + (double)s->restore_time / s->restore_count : 0); } #else -void tcg_dump_info(void) +void tcg_dump_info(GString *buf) { - qemu_printf("[TCG profiler not compiled]\n"); + g_string_append_printf(buf, "[TCG profiler not compiled]\n"); } #endif diff --git a/tcg/tci.c b/tcg/tci.c index b672c7cae5..e76087ccac 100644 --- a/tcg/tci.c +++ b/tcg/tci.c @@ -22,6 +22,7 @@ #include "tcg/tcg.h" /* MAX_OPC_PARAM_IARGS */ #include "exec/cpu_ldst.h" #include "tcg/tcg-op.h" +#include "tcg/tcg-ldst.h" #include "qemu/compiler.h" #include @@ -61,7 +62,7 @@ static uint64_t tci_uint64(uint32_t high, uint32_t low) * i = immediate (uint32_t) * I = immediate (tcg_target_ulong) * l = label or pointer - * m = immediate (TCGMemOpIdx) + * m = immediate (MemOpIdx) * n = immediate (call return length) * r = register * s = signed ldst offset @@ -105,7 +106,7 @@ static void tci_args_ri(uint32_t insn, TCGReg *r0, tcg_target_ulong *i1) } static void tci_args_rrm(uint32_t insn, TCGReg *r0, - TCGReg *r1, TCGMemOpIdx *m2) + TCGReg *r1, MemOpIdx *m2) { *r0 = extract32(insn, 8, 4); *r1 = extract32(insn, 12, 4); @@ -145,7 +146,7 @@ static void tci_args_rrrc(uint32_t insn, } static void tci_args_rrrm(uint32_t insn, - TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGMemOpIdx *m3) + TCGReg *r0, TCGReg *r1, TCGReg *r2, MemOpIdx *m3) { *r0 = extract32(insn, 8, 4); *r1 = extract32(insn, 12, 4); @@ -289,7 +290,7 @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition) } static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr, - TCGMemOpIdx oi, const void *tb_ptr) + MemOpIdx oi, const void *tb_ptr) { MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE); uintptr_t ra = (uintptr_t)tb_ptr; @@ -374,7 +375,7 @@ static uint64_t tci_qemu_ld(CPUArchState *env, target_ulong taddr, } static void tci_qemu_st(CPUArchState *env, target_ulong taddr, uint64_t val, - TCGMemOpIdx oi, const void *tb_ptr) + MemOpIdx oi, const void *tb_ptr) { MemOp mop = get_memop(oi) & (MO_BSWAP | MO_SSIZE); uintptr_t ra = (uintptr_t)tb_ptr; @@ -482,7 +483,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState *env, uint32_t tmp32; uint64_t tmp64; uint64_t T1, T2; - TCGMemOpIdx oi; + MemOpIdx oi; int32_t ofs; void *ptr; @@ -1148,7 +1149,7 @@ int print_insn_tci(bfd_vma addr, disassemble_info *info) tcg_target_ulong i1; int32_t s2; TCGCond c; - TCGMemOpIdx oi; + MemOpIdx oi; uint8_t pos, len; void *ptr; diff --git a/tests/Makefile.include b/tests/Makefile.include index 7426522bbe..4c564cf789 100644 --- a/tests/Makefile.include +++ b/tests/Makefile.include @@ -16,7 +16,7 @@ ifneq ($(filter $(all-check-targets), check-softfloat),) @echo " $(MAKE) check-tcg Run TCG tests" @echo " $(MAKE) check-softfloat Run FPU emulation tests" endif - @echo " $(MAKE) check-acceptance Run acceptance (functional) tests for currently configured targets" + @echo " $(MAKE) check-avocado Run avocado (integration) tests for currently configured targets" @echo @echo " $(MAKE) check-report.tap Generates an aggregated TAP test report" @echo " $(MAKE) check-venv Creates a Python venv for tests" @@ -24,7 +24,7 @@ endif @echo @echo "The following are useful for CI builds" @echo " $(MAKE) check-build Build most test binaris" - @echo " $(MAKE) get-vm-images Downloads all images used by acceptance tests, according to configured targets (~350 MB each, 1.5 GB max)" + @echo " $(MAKE) get-vm-images Downloads all images used by avocado tests, according to configured targets (~350 MB each, 1.5 GB max)" @echo @echo @echo "The variable SPEED can be set to control the gtester speed setting." @@ -83,13 +83,13 @@ clean-tcg: $(CLEAN_TCG_TARGET_RULES) # Python venv for running tests -.PHONY: check-venv check-acceptance +.PHONY: check-venv check-avocado check-acceptance check-acceptance-deprecated-warning TESTS_VENV_DIR=$(BUILD_DIR)/tests/venv TESTS_VENV_REQ=$(SRC_PATH)/tests/requirements.txt TESTS_RESULTS_DIR=$(BUILD_DIR)/tests/results ifndef AVOCADO_TESTS - AVOCADO_TESTS=tests/acceptance + AVOCADO_TESTS=tests/avocado endif # Controls the output generated by Avocado when running tests. # Any number of command separated loggers are accepted. For more @@ -127,12 +127,12 @@ get-vm-image-fedora-31-%: check-venv $(call quiet-command, \ $(TESTS_VENV_DIR)/bin/python -m avocado vmimage get \ --distro=fedora --distro-version=31 --arch=$*, \ - "AVOCADO", "Downloading acceptance tests VM image for $*") + "AVOCADO", "Downloading avocado tests VM image for $*") # download all vm images, according to defined targets get-vm-images: check-venv $(patsubst %,get-vm-image-fedora-31-%, $(FEDORA_31_DOWNLOAD)) -check-acceptance: check-venv $(TESTS_RESULTS_DIR) get-vm-images +check-avocado: check-venv $(TESTS_RESULTS_DIR) get-vm-images $(call quiet-command, \ $(TESTS_VENV_DIR)/bin/python -m avocado \ --show=$(AVOCADO_SHOW) run --job-results-dir=$(TESTS_RESULTS_DIR) \ @@ -140,7 +140,14 @@ check-acceptance: check-venv $(TESTS_RESULTS_DIR) get-vm-images --filter-by-tags-include-empty-key) \ $(AVOCADO_CMDLINE_TAGS) \ $(if $(GITLAB_CI),,--failfast) $(AVOCADO_TESTS), \ - "AVOCADO", "tests/acceptance") + "AVOCADO", "tests/avocado") + +check-acceptance-deprecated-warning: + @echo + @echo "Note '$(MAKE) check-acceptance' is deprecated, use '$(MAKE) check-avocado' instead." + @echo + +check-acceptance: check-acceptance-deprecated-warning | check-avocado # Consolidated targets @@ -148,16 +155,23 @@ check-acceptance: check-venv $(TESTS_RESULTS_DIR) get-vm-images check: ifeq ($(CONFIG_TOOLS)$(CONFIG_POSIX),yy) -QEMU_IOTESTS_HELPERS-$(CONFIG_LINUX) = tests/qemu-iotests/socket_scm_helper$(EXESUF) check: check-block export PYTHON -check-block: $(SRC_PATH)/tests/check-block.sh qemu-img$(EXESUF) \ - qemu-io$(EXESUF) qemu-nbd$(EXESUF) $(QEMU_IOTESTS_HELPERS-y) \ - $(filter qemu-system-%, $(ninja-targets)) + +ifneq ($(filter check check-block check-build, $(MAKECMDGOALS)),) +ninja-cmd-goals += \ + qemu-img$(EXESUF) \ + qemu-io$(EXESUF) \ + qemu-nbd$(EXESUF) \ + storage-daemon/qemu-storage-daemon$(EXESUF) \ + $(filter qemu-system-%, $(ninja-targets)) +endif + +check-block: $(SRC_PATH)/tests/check-block.sh run-ninja @$< endif -check-build: $(QEMU_IOTESTS_HELPERS-y) +check-build: run-ninja check-clean: rm -rf $(TESTS_VENV_DIR) $(TESTS_RESULTS_DIR) diff --git a/tests/acceptance/README.rst b/tests/acceptance/README.rst deleted file mode 100644 index 89260faed6..0000000000 --- a/tests/acceptance/README.rst +++ /dev/null @@ -1,10 +0,0 @@ -============================================ -Acceptance tests using the Avocado Framework -============================================ - -This directory contains functional tests, also known as acceptance -level tests. They're usually higher level, and may interact with -external resources and with various guest operating systems. - -For more information, please refer to ``docs/devel/testing.rst``, -section "Acceptance tests using the Avocado Framework". diff --git a/tests/avocado/README.rst b/tests/avocado/README.rst new file mode 100644 index 0000000000..94488371bb --- /dev/null +++ b/tests/avocado/README.rst @@ -0,0 +1,10 @@ +============================================= +Integration tests using the Avocado Framework +============================================= + +This directory contains integration tests. They're usually higher +level, and may interact with external resources and with various +guest operating systems. + +For more information, please refer to ``docs/devel/testing.rst``, +section "Integration tests using the Avocado Framework". diff --git a/tests/acceptance/avocado_qemu/__init__.py b/tests/avocado/avocado_qemu/__init__.py similarity index 87% rename from tests/acceptance/avocado_qemu/__init__.py rename to tests/avocado/avocado_qemu/__init__.py index 1841053e2c..75063c0c30 100644 --- a/tests/acceptance/avocado_qemu/__init__.py +++ b/tests/avocado/avocado_qemu/__init__.py @@ -11,13 +11,14 @@ import logging import os import shutil +import subprocess import sys import tempfile import time import uuid import avocado -from avocado.utils import cloudinit, datadrainer, network, ssh, vmimage +from avocado.utils import cloudinit, datadrainer, network, process, ssh, vmimage from avocado.utils.path import find_command #: The QEMU build root directory. It may also be the source directory @@ -27,7 +28,7 @@ from avocado.utils.path import find_command BUILD_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) if os.path.islink(os.path.dirname(os.path.dirname(__file__))): - # The link to the acceptance tests dir in the source code directory + # The link to the avocado tests dir in the source code directory lnk = os.path.dirname(os.path.dirname(__file__)) #: The QEMU root source directory SOURCE_DIR = os.path.dirname(os.path.dirname(os.readlink(lnk))) @@ -41,11 +42,67 @@ from qemu.utils import (get_info_usernet_hostfwd_port, kvm_available, tcg_available) +def has_cmd(name, args=None): + """ + This function is for use in a @avocado.skipUnless decorator, e.g.: + + @skipUnless(*has_cmd('sudo -n', ('sudo', '-n', 'true'))) + def test_something_that_needs_sudo(self): + ... + """ + + if args is None: + args = ('which', name) + + try: + _, stderr, exitcode = run_cmd(args) + except Exception as e: + exitcode = -1 + stderr = str(e) + + if exitcode != 0: + cmd_line = ' '.join(args) + err = f'{name} required, but "{cmd_line}" failed: {stderr.strip()}' + return (False, err) + else: + return (True, '') + +def has_cmds(*cmds): + """ + This function is for use in a @avocado.skipUnless decorator and + allows checking for the availability of multiple commands, e.g.: + + @skipUnless(*has_cmds(('cmd1', ('cmd1', '--some-parameter')), + 'cmd2', 'cmd3')) + def test_something_that_needs_cmd1_and_cmd2(self): + ... + """ + + for cmd in cmds: + if isinstance(cmd, str): + cmd = (cmd,) + + ok, errstr = has_cmd(*cmd) + if not ok: + return (False, errstr) + + return (True, '') + +def run_cmd(args): + subp = subprocess.Popen(args, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True) + stdout, stderr = subp.communicate() + ret = subp.returncode + + return (stdout, stderr, ret) + def is_readable_executable_file(path): return os.path.isfile(path) and os.access(path, os.R_OK | os.X_OK) -def pick_default_qemu_bin(arch=None): +def pick_default_qemu_bin(bin_prefix='qemu-system-', arch=None): """ Picks the path of a QEMU binary, starting either in the current working directory or in the source tree root directory. @@ -64,7 +121,7 @@ def pick_default_qemu_bin(arch=None): # qemu binary path does not match arch for powerpc, handle it if 'ppc64le' in arch: arch = 'ppc64' - qemu_bin_relative_path = "./qemu-system-%s" % arch + qemu_bin_relative_path = os.path.join(".", bin_prefix + arch) if is_readable_executable_file(qemu_bin_relative_path): return qemu_bin_relative_path @@ -119,7 +176,7 @@ def interrupt_interactive_console_until_pattern(test, success_message, :param test: an Avocado test containing a VM that will have its console read and probed for a success or failure message - :type test: :class:`avocado_qemu.Test` + :type test: :class:`avocado_qemu.QemuSystemTest` :param success_message: if this message appears, test succeeds :param failure_message: if this message appears, test fails :param interrupt_string: a string to send to the console before trying @@ -135,7 +192,7 @@ def wait_for_console_pattern(test, success_message, failure_message=None, :param test: an Avocado test containing a VM that will have its console read and probed for a success or failure message - :type test: :class:`avocado_qemu.Test` + :type test: :class:`avocado_qemu.QemuSystemTest` :param success_message: if this message appears, test succeeds :param failure_message: if this message appears, test fails """ @@ -147,7 +204,7 @@ def exec_command(test, command): the content. :param test: an Avocado test containing a VM. - :type test: :class:`avocado_qemu.Test` + :type test: :class:`avocado_qemu.QemuSystemTest` :param command: the command to send :type command: str """ @@ -162,14 +219,14 @@ def exec_command_and_wait_for_pattern(test, command, :param test: an Avocado test containing a VM that will have its console read and probed for a success or failure message - :type test: :class:`avocado_qemu.Test` + :type test: :class:`avocado_qemu.QemuSystemTest` :param command: the command to send :param success_message: if this message appears, test succeeds :param failure_message: if this message appears, test fails """ _console_interaction(test, success_message, failure_message, command + '\r') -class Test(avocado.Test): +class QemuBaseTest(avocado.Test): def _get_unique_tag_val(self, tag_name): """ Gets a tag value, if unique for a key @@ -179,6 +236,43 @@ class Test(avocado.Test): return vals.pop() return None + def setUp(self, bin_prefix): + self.arch = self.params.get('arch', + default=self._get_unique_tag_val('arch')) + + self.cpu = self.params.get('cpu', + default=self._get_unique_tag_val('cpu')) + + default_qemu_bin = pick_default_qemu_bin(bin_prefix, arch=self.arch) + self.qemu_bin = self.params.get('qemu_bin', + default=default_qemu_bin) + if self.qemu_bin is None: + self.cancel("No QEMU binary defined or found in the build tree") + + def fetch_asset(self, name, + asset_hash=None, algorithm=None, + locations=None, expire=None, + find_only=False, cancel_on_missing=True): + return super().fetch_asset(name, + asset_hash=asset_hash, + algorithm=algorithm, + locations=locations, + expire=expire, + find_only=find_only, + cancel_on_missing=cancel_on_missing) + + +class QemuSystemTest(QemuBaseTest): + """Facilitates system emulation tests.""" + + def setUp(self): + self._vms = {} + + super().setUp('qemu-system-') + + self.machine = self.params.get('machine', + default=self._get_unique_tag_val('machine')) + def require_accelerator(self, accelerator): """ Requires an accelerator to be available for the test to continue @@ -201,24 +295,6 @@ class Test(avocado.Test): self.cancel("%s accelerator does not seem to be " "available" % accelerator) - def setUp(self): - self._vms = {} - - self.arch = self.params.get('arch', - default=self._get_unique_tag_val('arch')) - - self.cpu = self.params.get('cpu', - default=self._get_unique_tag_val('cpu')) - - self.machine = self.params.get('machine', - default=self._get_unique_tag_val('machine')) - - default_qemu_bin = pick_default_qemu_bin(arch=self.arch) - self.qemu_bin = self.params.get('qemu_bin', - default=default_qemu_bin) - if self.qemu_bin is None: - self.cancel("No QEMU binary defined or found in the build tree") - def _new_vm(self, name, *args): self._sd = tempfile.TemporaryDirectory(prefix="avo_qemu_sock_") vm = QEMUMachine(self.qemu_bin, base_temp_dir=self.workdir, @@ -272,17 +348,22 @@ class Test(avocado.Test): self._sd = None super().tearDown() - def fetch_asset(self, name, - asset_hash=None, algorithm=None, - locations=None, expire=None, - find_only=False, cancel_on_missing=True): - return super().fetch_asset(name, - asset_hash=asset_hash, - algorithm=algorithm, - locations=locations, - expire=expire, - find_only=find_only, - cancel_on_missing=cancel_on_missing) + +class QemuUserTest(QemuBaseTest): + """Facilitates user-mode emulation tests.""" + + def setUp(self): + self._ldpath = [] + super().setUp('qemu-') + + def add_ldpath(self, ldpath): + self._ldpath.append(os.path.abspath(ldpath)) + + def run(self, bin_path, args=[]): + qemu_args = " ".join(["-L %s" % ldpath for ldpath in self._ldpath]) + bin_args = " ".join(args) + return process.run("%s %s %s %s" % (self.qemu_bin, qemu_args, + bin_path, bin_args)) class LinuxSSHMixIn: @@ -424,11 +505,11 @@ class LinuxDistro: return self._info.get('kernel_params', None) -class LinuxTest(LinuxSSHMixIn, Test): +class LinuxTest(LinuxSSHMixIn, QemuSystemTest): """Facilitates having a cloud-image Linux based available. - For tests that indend to interact with guests, this is a better choice - to start with than the more vanilla `Test` class. + For tests that indent to interact with guests, this is a better choice + to start with than the more vanilla `QemuSystemTest` class. """ timeout = 900 diff --git a/tests/acceptance/boot_linux.py b/tests/avocado/boot_linux.py similarity index 100% rename from tests/acceptance/boot_linux.py rename to tests/avocado/boot_linux.py diff --git a/tests/acceptance/boot_linux_console.py b/tests/avocado/boot_linux_console.py similarity index 99% rename from tests/acceptance/boot_linux_console.py rename to tests/avocado/boot_linux_console.py index 06fc967f6c..9c618d4809 100644 --- a/tests/acceptance/boot_linux_console.py +++ b/tests/avocado/boot_linux_console.py @@ -15,20 +15,13 @@ import shutil from avocado import skip from avocado import skipUnless -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import exec_command from avocado_qemu import exec_command_and_wait_for_pattern from avocado_qemu import interrupt_interactive_console_until_pattern from avocado_qemu import wait_for_console_pattern from avocado.utils import process from avocado.utils import archive -from avocado.utils.path import find_command, CmdNotFoundError - -P7ZIP_AVAILABLE = True -try: - find_command('7z') -except CmdNotFoundError: - P7ZIP_AVAILABLE = False """ Round up to next power of 2 @@ -46,7 +39,7 @@ def image_pow2ceil_expand(path): with open(path, 'ab+') as fd: fd.truncate(size_aligned) -class LinuxKernelTest(Test): +class LinuxKernelTest(QemuSystemTest): KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' def wait_for_console_pattern(self, success_message, vm=None): diff --git a/tests/acceptance/boot_xen.py b/tests/avocado/boot_xen.py similarity index 100% rename from tests/acceptance/boot_xen.py rename to tests/avocado/boot_xen.py diff --git a/tests/acceptance/cpu_queries.py b/tests/avocado/cpu_queries.py similarity index 91% rename from tests/acceptance/cpu_queries.py rename to tests/avocado/cpu_queries.py index cc9e380cc7..cf69f69b11 100644 --- a/tests/acceptance/cpu_queries.py +++ b/tests/avocado/cpu_queries.py @@ -8,9 +8,9 @@ # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest -class QueryCPUModelExpansion(Test): +class QueryCPUModelExpansion(QemuSystemTest): """ Run query-cpu-model-expansion for each CPU model, and validate results """ diff --git a/tests/acceptance/empty_cpu_model.py b/tests/avocado/empty_cpu_model.py similarity index 88% rename from tests/acceptance/empty_cpu_model.py rename to tests/avocado/empty_cpu_model.py index a1e59e45e4..22f504418d 100644 --- a/tests/acceptance/empty_cpu_model.py +++ b/tests/avocado/empty_cpu_model.py @@ -7,9 +7,9 @@ # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest -class EmptyCPUModel(Test): +class EmptyCPUModel(QemuSystemTest): def test(self): self.vm.add_args('-S', '-display', 'none', '-machine', 'none', '-cpu', '') self.vm.set_qmp_monitor(enabled=False) diff --git a/tests/acceptance/hotplug_cpu.py b/tests/avocado/hotplug_cpu.py similarity index 100% rename from tests/acceptance/hotplug_cpu.py rename to tests/avocado/hotplug_cpu.py diff --git a/tests/acceptance/info_usernet.py b/tests/avocado/info_usernet.py similarity index 92% rename from tests/acceptance/info_usernet.py rename to tests/avocado/info_usernet.py index 9c1fd903a0..dc01f74150 100644 --- a/tests/acceptance/info_usernet.py +++ b/tests/avocado/info_usernet.py @@ -8,12 +8,12 @@ # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from qemu.utils import get_info_usernet_hostfwd_port -class InfoUsernet(Test): +class InfoUsernet(QemuSystemTest): def test_hostfwd(self): self.vm.add_args('-netdev', 'user,id=vnet,hostfwd=:127.0.0.1:0-:22') diff --git a/tests/acceptance/intel_iommu.py b/tests/avocado/intel_iommu.py similarity index 100% rename from tests/acceptance/intel_iommu.py rename to tests/avocado/intel_iommu.py diff --git a/tests/acceptance/linux_initrd.py b/tests/avocado/linux_initrd.py similarity index 96% rename from tests/acceptance/linux_initrd.py rename to tests/avocado/linux_initrd.py index a249e2f14a..ba02e5a563 100644 --- a/tests/acceptance/linux_initrd.py +++ b/tests/avocado/linux_initrd.py @@ -1,4 +1,4 @@ -# Linux initrd acceptance test. +# Linux initrd integration test. # # Copyright (c) 2018 Red Hat, Inc. # @@ -12,11 +12,11 @@ import os import logging import tempfile -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado import skipIf -class LinuxInitrd(Test): +class LinuxInitrd(QemuSystemTest): """ Checks QEMU evaluates correctly the initrd file passed as -initrd option. diff --git a/tests/acceptance/linux_ssh_mips_malta.py b/tests/avocado/linux_ssh_mips_malta.py similarity index 98% rename from tests/acceptance/linux_ssh_mips_malta.py rename to tests/avocado/linux_ssh_mips_malta.py index 4de1947418..c0f0be5ade 100644 --- a/tests/acceptance/linux_ssh_mips_malta.py +++ b/tests/avocado/linux_ssh_mips_malta.py @@ -12,7 +12,8 @@ import logging import time from avocado import skipUnless -from avocado_qemu import Test, LinuxSSHMixIn +from avocado_qemu import LinuxSSHMixIn +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado.utils import process from avocado.utils import archive @@ -21,7 +22,7 @@ from avocado.utils import ssh @skipUnless(os.getenv('AVOCADO_TIMEOUT_EXPECTED'), 'Test might timeout') @skipUnless(ssh.SSH_CLIENT_BINARY, 'No SSH client available') -class LinuxSSH(Test, LinuxSSHMixIn): +class LinuxSSH(QemuSystemTest, LinuxSSHMixIn): timeout = 150 # Not for 'configure --enable-debug --enable-debug-tcg' diff --git a/tests/avocado/load_bflt.py b/tests/avocado/load_bflt.py new file mode 100644 index 0000000000..bb50cec1ee --- /dev/null +++ b/tests/avocado/load_bflt.py @@ -0,0 +1,54 @@ +# Test the bFLT loader format +# +# Copyright (C) 2019 Philippe Mathieu-Daudé +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os +import bz2 +import subprocess + +from avocado import skipUnless +from avocado_qemu import QemuUserTest +from avocado_qemu import has_cmd + + +class LoadBFLT(QemuUserTest): + + def extract_cpio(self, cpio_path): + """ + Extracts a cpio archive into the test workdir + + :param cpio_path: path to the cpio archive + """ + cwd = os.getcwd() + os.chdir(self.workdir) + with bz2.open(cpio_path, 'rb') as archive_cpio: + subprocess.run(['cpio', '-i'], input=archive_cpio.read(), + stderr=subprocess.DEVNULL) + os.chdir(cwd) + + @skipUnless(*has_cmd('cpio')) + @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code') + def test_stm32(self): + """ + :avocado: tags=arch:arm + :avocado: tags=linux_user + :avocado: tags=quick + """ + # See https://elinux.org/STM32#User_Space + rootfs_url = ('https://elinux.org/images/5/51/' + 'Stm32_mini_rootfs.cpio.bz2') + rootfs_hash = '9f065e6ba40cce7411ba757f924f30fcc57951e6' + rootfs_path_bz2 = self.fetch_asset(rootfs_url, asset_hash=rootfs_hash) + busybox_path = os.path.join(self.workdir, "/bin/busybox") + + self.extract_cpio(rootfs_path_bz2) + + res = self.run(busybox_path) + ver = 'BusyBox v1.24.0.git (2015-02-03 22:17:13 CET) multi-call binary.' + self.assertIn(ver, res.stdout_text) + + res = self.run(busybox_path, ['uname', '-a']) + unm = 'armv7l GNU/Linux' + self.assertIn(unm, res.stdout_text) diff --git a/tests/acceptance/machine_arm_canona1100.py b/tests/avocado/machine_arm_canona1100.py similarity index 93% rename from tests/acceptance/machine_arm_canona1100.py rename to tests/avocado/machine_arm_canona1100.py index 0e5c43dbcf..182a0b0513 100644 --- a/tests/acceptance/machine_arm_canona1100.py +++ b/tests/avocado/machine_arm_canona1100.py @@ -8,11 +8,11 @@ # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado.utils import archive -class CanonA1100Machine(Test): +class CanonA1100Machine(QemuSystemTest): """Boots the barebox firmware and checks that the console is operational""" timeout = 90 diff --git a/tests/acceptance/machine_arm_integratorcp.py b/tests/avocado/machine_arm_integratorcp.py similarity index 97% rename from tests/acceptance/machine_arm_integratorcp.py rename to tests/avocado/machine_arm_integratorcp.py index 49c8ebff78..1ffe1073ef 100644 --- a/tests/acceptance/machine_arm_integratorcp.py +++ b/tests/avocado/machine_arm_integratorcp.py @@ -12,7 +12,7 @@ import os import logging from avocado import skipUnless -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern @@ -29,7 +29,7 @@ except ImportError: CV2_AVAILABLE = False -class IntegratorMachine(Test): +class IntegratorMachine(QemuSystemTest): timeout = 90 diff --git a/tests/acceptance/machine_arm_n8x0.py b/tests/avocado/machine_arm_n8x0.py similarity index 95% rename from tests/acceptance/machine_arm_n8x0.py rename to tests/avocado/machine_arm_n8x0.py index e5741f2d8d..12e9a6803b 100644 --- a/tests/acceptance/machine_arm_n8x0.py +++ b/tests/avocado/machine_arm_n8x0.py @@ -11,10 +11,10 @@ import os from avocado import skipUnless -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern -class N8x0Machine(Test): +class N8x0Machine(QemuSystemTest): """Boots the Linux kernel and checks that the console is operational""" timeout = 90 diff --git a/tests/acceptance/machine_avr6.py b/tests/avocado/machine_avr6.py similarity index 94% rename from tests/acceptance/machine_avr6.py rename to tests/avocado/machine_avr6.py index 6baf4e9c7f..5485db79c6 100644 --- a/tests/acceptance/machine_avr6.py +++ b/tests/avocado/machine_avr6.py @@ -1,5 +1,5 @@ # -# QEMU AVR acceptance tests +# QEMU AVR integration tests # # Copyright (c) 2019-2020 Michael Rolnik # @@ -19,9 +19,9 @@ import time -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest -class AVR6Machine(Test): +class AVR6Machine(QemuSystemTest): timeout = 5 def test_freertos(self): diff --git a/tests/acceptance/machine_m68k_nextcube.py b/tests/avocado/machine_m68k_nextcube.py similarity index 97% rename from tests/acceptance/machine_m68k_nextcube.py rename to tests/avocado/machine_m68k_nextcube.py index 09e2745cc5..6790e7d9cd 100644 --- a/tests/acceptance/machine_m68k_nextcube.py +++ b/tests/avocado/machine_m68k_nextcube.py @@ -8,7 +8,7 @@ import os import time -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado import skipUnless from tesseract_utils import tesseract_available, tesseract_ocr @@ -20,7 +20,7 @@ except ImportError: PIL_AVAILABLE = False -class NextCubeMachine(Test): +class NextCubeMachine(QemuSystemTest): """ :avocado: tags=arch:m68k :avocado: tags=machine:next-cube diff --git a/tests/acceptance/machine_microblaze.py b/tests/avocado/machine_microblaze.py similarity index 94% rename from tests/acceptance/machine_microblaze.py rename to tests/avocado/machine_microblaze.py index 7f6d18495d..4928920f96 100644 --- a/tests/acceptance/machine_microblaze.py +++ b/tests/avocado/machine_microblaze.py @@ -5,11 +5,11 @@ # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado.utils import archive -class MicroblazeMachine(Test): +class MicroblazeMachine(QemuSystemTest): timeout = 90 diff --git a/tests/acceptance/machine_mips_fuloong2e.py b/tests/avocado/machine_mips_fuloong2e.py similarity index 95% rename from tests/acceptance/machine_mips_fuloong2e.py rename to tests/avocado/machine_mips_fuloong2e.py index 0ac285e2af..89291f47b2 100644 --- a/tests/acceptance/machine_mips_fuloong2e.py +++ b/tests/avocado/machine_mips_fuloong2e.py @@ -10,10 +10,10 @@ import os from avocado import skipUnless -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern -class MipsFuloong2e(Test): +class MipsFuloong2e(QemuSystemTest): timeout = 60 diff --git a/tests/acceptance/machine_mips_loongson3v.py b/tests/avocado/machine_mips_loongson3v.py similarity index 94% rename from tests/acceptance/machine_mips_loongson3v.py rename to tests/avocado/machine_mips_loongson3v.py index 85b131a40f..5194cf18c9 100644 --- a/tests/acceptance/machine_mips_loongson3v.py +++ b/tests/avocado/machine_mips_loongson3v.py @@ -11,10 +11,10 @@ import os import time from avocado import skipUnless -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern -class MipsLoongson3v(Test): +class MipsLoongson3v(QemuSystemTest): timeout = 60 @skipUnless(os.getenv('AVOCADO_ALLOW_UNTRUSTED_CODE'), 'untrusted code') diff --git a/tests/acceptance/machine_mips_malta.py b/tests/avocado/machine_mips_malta.py similarity index 98% rename from tests/acceptance/machine_mips_malta.py rename to tests/avocado/machine_mips_malta.py index b67d8cb141..f1895d59f3 100644 --- a/tests/acceptance/machine_mips_malta.py +++ b/tests/avocado/machine_mips_malta.py @@ -12,7 +12,7 @@ import gzip import logging from avocado import skipUnless -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado.utils import archive from avocado import skipIf @@ -33,7 +33,7 @@ except ImportError: @skipUnless(NUMPY_AVAILABLE, 'Python NumPy not installed') @skipUnless(CV2_AVAILABLE, 'Python OpenCV not installed') -class MaltaMachineFramebuffer(Test): +class MaltaMachineFramebuffer(QemuSystemTest): timeout = 30 diff --git a/tests/acceptance/machine_rx_gdbsim.py b/tests/avocado/machine_rx_gdbsim.py similarity index 97% rename from tests/acceptance/machine_rx_gdbsim.py rename to tests/avocado/machine_rx_gdbsim.py index 32b737b6d8..6cd8704b01 100644 --- a/tests/acceptance/machine_rx_gdbsim.py +++ b/tests/avocado/machine_rx_gdbsim.py @@ -11,13 +11,13 @@ import os from avocado import skipIf -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import exec_command_and_wait_for_pattern from avocado_qemu import wait_for_console_pattern from avocado.utils import archive -class RxGdbSimMachine(Test): +class RxGdbSimMachine(QemuSystemTest): timeout = 30 KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' diff --git a/tests/acceptance/machine_s390_ccw_virtio.py b/tests/avocado/machine_s390_ccw_virtio.py similarity index 99% rename from tests/acceptance/machine_s390_ccw_virtio.py rename to tests/avocado/machine_s390_ccw_virtio.py index 4028c99afc..bd03d7160b 100644 --- a/tests/acceptance/machine_s390_ccw_virtio.py +++ b/tests/avocado/machine_s390_ccw_virtio.py @@ -13,12 +13,12 @@ import os import tempfile from avocado import skipIf -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import exec_command_and_wait_for_pattern from avocado_qemu import wait_for_console_pattern from avocado.utils import archive -class S390CCWVirtioMachine(Test): +class S390CCWVirtioMachine(QemuSystemTest): KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' timeout = 120 diff --git a/tests/acceptance/machine_sparc64_sun4u.py b/tests/avocado/machine_sparc64_sun4u.py similarity index 100% rename from tests/acceptance/machine_sparc64_sun4u.py rename to tests/avocado/machine_sparc64_sun4u.py diff --git a/tests/acceptance/machine_sparc_leon3.py b/tests/avocado/machine_sparc_leon3.py similarity index 94% rename from tests/acceptance/machine_sparc_leon3.py rename to tests/avocado/machine_sparc_leon3.py index 2405cd7a0d..e61b223185 100644 --- a/tests/acceptance/machine_sparc_leon3.py +++ b/tests/avocado/machine_sparc_leon3.py @@ -5,12 +5,12 @@ # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado import skip -class Leon3Machine(Test): +class Leon3Machine(QemuSystemTest): timeout = 60 diff --git a/tests/acceptance/migration.py b/tests/avocado/migration.py similarity index 97% rename from tests/acceptance/migration.py rename to tests/avocado/migration.py index 792639cb69..584d6ef53f 100644 --- a/tests/acceptance/migration.py +++ b/tests/avocado/migration.py @@ -11,7 +11,7 @@ import tempfile -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado import skipUnless from avocado.utils import network @@ -19,7 +19,7 @@ from avocado.utils import wait from avocado.utils.path import find_command -class Migration(Test): +class Migration(QemuSystemTest): """ :avocado: tags=migration """ diff --git a/tests/acceptance/multiprocess.py b/tests/avocado/multiprocess.py similarity index 98% rename from tests/acceptance/multiprocess.py rename to tests/avocado/multiprocess.py index 96627f022a..80a3b8f442 100644 --- a/tests/acceptance/multiprocess.py +++ b/tests/avocado/multiprocess.py @@ -7,12 +7,12 @@ import os import socket -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado_qemu import exec_command from avocado_qemu import exec_command_and_wait_for_pattern -class Multiprocess(Test): +class Multiprocess(QemuSystemTest): """ :avocado: tags=multiprocess """ diff --git a/tests/acceptance/pc_cpu_hotplug_props.py b/tests/avocado/pc_cpu_hotplug_props.py similarity index 93% rename from tests/acceptance/pc_cpu_hotplug_props.py rename to tests/avocado/pc_cpu_hotplug_props.py index 2e86d5017a..52b878188e 100644 --- a/tests/acceptance/pc_cpu_hotplug_props.py +++ b/tests/avocado/pc_cpu_hotplug_props.py @@ -20,9 +20,9 @@ # License along with this library; if not, see . # -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest -class OmittedCPUProps(Test): +class OmittedCPUProps(QemuSystemTest): """ :avocado: tags=arch:x86_64 :avocado: tags=cpu:qemu64 diff --git a/tests/avocado/ppc_405.py b/tests/avocado/ppc_405.py new file mode 100644 index 0000000000..a47f89b934 --- /dev/null +++ b/tests/avocado/ppc_405.py @@ -0,0 +1,42 @@ +# Test that the U-Boot firmware boots on ppc 405 machines and check the console +# +# Copyright (c) 2021 Red Hat, Inc. +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from avocado.utils import archive +from avocado_qemu import QemuSystemTest +from avocado_qemu import wait_for_console_pattern +from avocado_qemu import exec_command_and_wait_for_pattern + +class Ppc405Machine(QemuSystemTest): + + timeout = 90 + + def do_test_ppc405(self): + uboot_url = ('https://gitlab.com/huth/u-boot/-/raw/' + 'taihu-2021-10-09/u-boot-taihu.bin') + uboot_hash = ('3208940e908a5edc7c03eab072c60f0dcfadc2ab'); + file_path = self.fetch_asset(uboot_url, asset_hash=uboot_hash) + self.vm.set_console(console_index=1) + self.vm.add_args('-bios', file_path) + self.vm.launch() + wait_for_console_pattern(self, 'AMCC PPC405EP Evaluation Board') + exec_command_and_wait_for_pattern(self, 'reset', 'AMCC PowerPC 405EP') + + def test_ppc_taihu(self): + """ + :avocado: tags=arch:ppc + :avocado: tags=machine:taihu + :avocado: tags=cpu:405ep + """ + self.do_test_ppc405() + + def test_ppc_ref405ep(self): + """ + :avocado: tags=arch:ppc + :avocado: tags=machine:ref405ep + :avocado: tags=cpu:405ep + """ + self.do_test_ppc405() diff --git a/tests/avocado/ppc_bamboo.py b/tests/avocado/ppc_bamboo.py new file mode 100644 index 0000000000..40629e3478 --- /dev/null +++ b/tests/avocado/ppc_bamboo.py @@ -0,0 +1,39 @@ +# Test that Linux kernel boots on the ppc bamboo board and check the console +# +# Copyright (c) 2021 Red Hat +# +# This work is licensed under the terms of the GNU GPL, version 2 or +# later. See the COPYING file in the top-level directory. + +from avocado.utils import archive +from avocado_qemu import QemuSystemTest +from avocado_qemu import wait_for_console_pattern +from avocado_qemu import exec_command_and_wait_for_pattern + +class BambooMachine(QemuSystemTest): + + timeout = 90 + + def test_ppc_bamboo(self): + """ + :avocado: tags=arch:ppc + :avocado: tags=machine:bamboo + :avocado: tags=cpu:440epb + :avocado: tags=device:rtl8139 + """ + tar_url = ('http://landley.net/aboriginal/downloads/binaries/' + 'system-image-powerpc-440fp.tar.gz') + tar_hash = '53e5f16414b195b82d2c70272f81c2eedb39bad9' + file_path = self.fetch_asset(tar_url, asset_hash=tar_hash) + archive.extract(file_path, self.workdir) + self.vm.set_console() + self.vm.add_args('-kernel', self.workdir + + '/system-image-powerpc-440fp/linux', + '-initrd', self.workdir + + '/system-image-powerpc-440fp/rootfs.cpio.gz', + '-nic', 'user,model=rtl8139,restrict=on') + self.vm.launch() + wait_for_console_pattern(self, 'Type exit when done') + exec_command_and_wait_for_pattern(self, 'ping 10.0.2.2', + '10.0.2.2 is alive!') + exec_command_and_wait_for_pattern(self, 'halt', 'System Halted') diff --git a/tests/acceptance/ppc_mpc8544ds.py b/tests/avocado/ppc_mpc8544ds.py similarity index 93% rename from tests/acceptance/ppc_mpc8544ds.py rename to tests/avocado/ppc_mpc8544ds.py index ce840600c1..886f967b15 100644 --- a/tests/acceptance/ppc_mpc8544ds.py +++ b/tests/avocado/ppc_mpc8544ds.py @@ -6,10 +6,10 @@ # later. See the COPYING file in the top-level directory. from avocado.utils import archive -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern -class Mpc8544dsMachine(Test): +class Mpc8544dsMachine(QemuSystemTest): timeout = 90 KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' diff --git a/tests/acceptance/ppc_prep_40p.py b/tests/avocado/ppc_prep_40p.py similarity index 97% rename from tests/acceptance/ppc_prep_40p.py rename to tests/avocado/ppc_prep_40p.py index 5e61e686bd..4bd956584d 100644 --- a/tests/acceptance/ppc_prep_40p.py +++ b/tests/avocado/ppc_prep_40p.py @@ -8,11 +8,11 @@ import os from avocado import skipUnless -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern -class IbmPrep40pMachine(Test): +class IbmPrep40pMachine(QemuSystemTest): timeout = 60 diff --git a/tests/acceptance/ppc_pseries.py b/tests/avocado/ppc_pseries.py similarity index 94% rename from tests/acceptance/ppc_pseries.py rename to tests/avocado/ppc_pseries.py index f14a884ee1..d8b04dc3ea 100644 --- a/tests/acceptance/ppc_pseries.py +++ b/tests/avocado/ppc_pseries.py @@ -6,10 +6,10 @@ # later. See the COPYING file in the top-level directory. from avocado.utils import archive -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern -class pseriesMachine(Test): +class pseriesMachine(QemuSystemTest): timeout = 90 KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' diff --git a/tests/acceptance/ppc_virtex_ml507.py b/tests/avocado/ppc_virtex_ml507.py similarity index 93% rename from tests/acceptance/ppc_virtex_ml507.py rename to tests/avocado/ppc_virtex_ml507.py index 27f7bf2d49..a6912ee579 100644 --- a/tests/acceptance/ppc_virtex_ml507.py +++ b/tests/avocado/ppc_virtex_ml507.py @@ -6,10 +6,10 @@ # later. See the COPYING file in the top-level directory. from avocado.utils import archive -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern -class VirtexMl507Machine(Test): +class VirtexMl507Machine(QemuSystemTest): timeout = 90 KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' diff --git a/tests/acceptance/replay_kernel.py b/tests/avocado/replay_kernel.py similarity index 100% rename from tests/acceptance/replay_kernel.py rename to tests/avocado/replay_kernel.py diff --git a/tests/acceptance/replay_linux.py b/tests/avocado/replay_linux.py similarity index 100% rename from tests/acceptance/replay_linux.py rename to tests/avocado/replay_linux.py diff --git a/tests/acceptance/reverse_debugging.py b/tests/avocado/reverse_debugging.py similarity index 100% rename from tests/acceptance/reverse_debugging.py rename to tests/avocado/reverse_debugging.py diff --git a/tests/acceptance/smmu.py b/tests/avocado/smmu.py similarity index 100% rename from tests/acceptance/smmu.py rename to tests/avocado/smmu.py diff --git a/tests/acceptance/tcg_plugins.py b/tests/avocado/tcg_plugins.py similarity index 100% rename from tests/acceptance/tcg_plugins.py rename to tests/avocado/tcg_plugins.py diff --git a/tests/acceptance/tesseract_utils.py b/tests/avocado/tesseract_utils.py similarity index 100% rename from tests/acceptance/tesseract_utils.py rename to tests/avocado/tesseract_utils.py diff --git a/tests/acceptance/version.py b/tests/avocado/version.py similarity index 88% rename from tests/acceptance/version.py rename to tests/avocado/version.py index 79b923d4fc..ded7f039c1 100644 --- a/tests/acceptance/version.py +++ b/tests/avocado/version.py @@ -9,10 +9,10 @@ # later. See the COPYING file in the top-level directory. -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest -class Version(Test): +class Version(QemuSystemTest): """ :avocado: tags=quick """ diff --git a/tests/acceptance/virtio-gpu.py b/tests/avocado/virtio-gpu.py similarity index 98% rename from tests/acceptance/virtio-gpu.py rename to tests/avocado/virtio-gpu.py index 4acc1e6d5f..2a249a3a2c 100644 --- a/tests/acceptance/virtio-gpu.py +++ b/tests/avocado/virtio-gpu.py @@ -4,8 +4,8 @@ # later. See the COPYING file in the top-level directory. -from avocado_qemu import Test from avocado_qemu import BUILD_DIR +from avocado_qemu import QemuSystemTest from avocado_qemu import wait_for_console_pattern from avocado_qemu import exec_command_and_wait_for_pattern from avocado_qemu import is_readable_executable_file @@ -27,7 +27,7 @@ def pick_default_vug_bin(): return bld_dir_path -class VirtioGPUx86(Test): +class VirtioGPUx86(QemuSystemTest): """ :avocado: tags=virtio-gpu :avocado: tags=arch:x86_64 diff --git a/tests/acceptance/virtio_check_params.py b/tests/avocado/virtio_check_params.py similarity index 98% rename from tests/acceptance/virtio_check_params.py rename to tests/avocado/virtio_check_params.py index 87e6c839d1..e869690473 100644 --- a/tests/acceptance/virtio_check_params.py +++ b/tests/avocado/virtio_check_params.py @@ -24,7 +24,7 @@ import logging sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) from qemu.machine import QEMUMachine -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest from avocado import skip #list of machine types and virtqueue properties to test @@ -41,7 +41,7 @@ VM_DEV_PARAMS = {'virtio-scsi-pci': ['-device', 'virtio-scsi-pci,id=scsi0'], 'driver=null-co,id=drive0,if=none']} -class VirtioMaxSegSettingsCheck(Test): +class VirtioMaxSegSettingsCheck(QemuSystemTest): @staticmethod def make_pattern(props): pattern_items = ['{0} = \w+'.format(prop) for prop in props] diff --git a/tests/acceptance/virtio_version.py b/tests/avocado/virtio_version.py similarity index 98% rename from tests/acceptance/virtio_version.py rename to tests/avocado/virtio_version.py index 33593c29dd..208910bb84 100644 --- a/tests/acceptance/virtio_version.py +++ b/tests/avocado/virtio_version.py @@ -13,7 +13,7 @@ import os sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) from qemu.machine import QEMUMachine -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest # Virtio Device IDs: VIRTIO_NET = 1 @@ -55,7 +55,7 @@ def get_pci_interfaces(vm, devtype): interfaces = ('pci-express-device', 'conventional-pci-device') return [i for i in interfaces if devtype_implements(vm, devtype, i)] -class VirtioVersionCheck(Test): +class VirtioVersionCheck(QemuSystemTest): """ Check if virtio-version-specific device types result in the same device tree created by `disable-modern` and diff --git a/tests/acceptance/virtiofs_submounts.py b/tests/avocado/virtiofs_submounts.py similarity index 82% rename from tests/acceptance/virtiofs_submounts.py rename to tests/avocado/virtiofs_submounts.py index 21ad7d792e..e6dc32ffd4 100644 --- a/tests/acceptance/virtiofs_submounts.py +++ b/tests/avocado/virtiofs_submounts.py @@ -6,67 +6,12 @@ import time from avocado import skipUnless from avocado_qemu import LinuxTest, BUILD_DIR +from avocado_qemu import has_cmds +from avocado_qemu import run_cmd from avocado_qemu import wait_for_console_pattern from avocado.utils import ssh -def run_cmd(args): - subp = subprocess.Popen(args, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True) - stdout, stderr = subp.communicate() - ret = subp.returncode - - return (stdout, stderr, ret) - -def has_cmd(name, args=None): - """ - This function is for use in a @avocado.skipUnless decorator, e.g.: - - @skipUnless(*has_cmd('sudo -n', ('sudo', '-n', 'true'))) - def test_something_that_needs_sudo(self): - ... - """ - - if args is None: - args = ('which', name) - - try: - _, stderr, exitcode = run_cmd(args) - except Exception as e: - exitcode = -1 - stderr = str(e) - - if exitcode != 0: - cmd_line = ' '.join(args) - err = f'{name} required, but "{cmd_line}" failed: {stderr.strip()}' - return (False, err) - else: - return (True, '') - -def has_cmds(*cmds): - """ - This function is for use in a @avocado.skipUnless decorator and - allows checking for the availability of multiple commands, e.g.: - - @skipUnless(*has_cmds(('cmd1', ('cmd1', '--some-parameter')), - 'cmd2', 'cmd3')) - def test_something_that_needs_cmd1_and_cmd2(self): - ... - """ - - for cmd in cmds: - if isinstance(cmd, str): - cmd = (cmd,) - - ok, errstr = has_cmd(*cmd) - if not ok: - return (False, errstr) - - return (True, '') - - class VirtiofsSubmountsTest(LinuxTest): """ :avocado: tags=arch:x86_64 diff --git a/tests/acceptance/virtiofs_submounts.py.data/cleanup.sh b/tests/avocado/virtiofs_submounts.py.data/cleanup.sh similarity index 100% rename from tests/acceptance/virtiofs_submounts.py.data/cleanup.sh rename to tests/avocado/virtiofs_submounts.py.data/cleanup.sh diff --git a/tests/acceptance/virtiofs_submounts.py.data/guest-cleanup.sh b/tests/avocado/virtiofs_submounts.py.data/guest-cleanup.sh similarity index 100% rename from tests/acceptance/virtiofs_submounts.py.data/guest-cleanup.sh rename to tests/avocado/virtiofs_submounts.py.data/guest-cleanup.sh diff --git a/tests/acceptance/virtiofs_submounts.py.data/guest.sh b/tests/avocado/virtiofs_submounts.py.data/guest.sh similarity index 100% rename from tests/acceptance/virtiofs_submounts.py.data/guest.sh rename to tests/avocado/virtiofs_submounts.py.data/guest.sh diff --git a/tests/acceptance/virtiofs_submounts.py.data/host.sh b/tests/avocado/virtiofs_submounts.py.data/host.sh similarity index 100% rename from tests/acceptance/virtiofs_submounts.py.data/host.sh rename to tests/avocado/virtiofs_submounts.py.data/host.sh diff --git a/tests/acceptance/vnc.py b/tests/avocado/vnc.py similarity index 96% rename from tests/acceptance/vnc.py rename to tests/avocado/vnc.py index f301fbb4f5..096432988f 100644 --- a/tests/acceptance/vnc.py +++ b/tests/avocado/vnc.py @@ -8,10 +8,10 @@ # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. -from avocado_qemu import Test +from avocado_qemu import QemuSystemTest -class Vnc(Test): +class Vnc(QemuSystemTest): """ :avocado: tags=vnc,quick """ diff --git a/tests/acceptance/x86_cpu_model_versions.py b/tests/avocado/x86_cpu_model_versions.py similarity index 99% rename from tests/acceptance/x86_cpu_model_versions.py rename to tests/avocado/x86_cpu_model_versions.py index 0e9feda62d..a6edf74c1c 100644 --- a/tests/acceptance/x86_cpu_model_versions.py +++ b/tests/avocado/x86_cpu_model_versions.py @@ -24,7 +24,7 @@ import avocado_qemu import re -class X86CPUModelAliases(avocado_qemu.Test): +class X86CPUModelAliases(avocado_qemu.QemuSystemTest): """ Validation of PC CPU model versions and CPU model aliases @@ -239,7 +239,7 @@ class X86CPUModelAliases(avocado_qemu.Test): self.validate_aliases(cpus) -class CascadelakeArchCapabilities(avocado_qemu.Test): +class CascadelakeArchCapabilities(avocado_qemu.QemuSystemTest): """ Validation of Cascadelake arch-capabilities diff --git a/tests/data/acpi/q35/APIC.xapic b/tests/data/acpi/q35/APIC.xapic new file mode 100644 index 0000000000..c1969c35aa Binary files /dev/null and b/tests/data/acpi/q35/APIC.xapic differ diff --git a/tests/data/acpi/q35/DMAR.dmar b/tests/data/acpi/q35/DMAR.dmar new file mode 100644 index 0000000000..0dca6e68ad Binary files /dev/null and b/tests/data/acpi/q35/DMAR.dmar differ diff --git a/tests/data/acpi/q35/DSDT b/tests/data/acpi/q35/DSDT index 842533f53e..281fc82c03 100644 Binary files a/tests/data/acpi/q35/DSDT and b/tests/data/acpi/q35/DSDT differ diff --git a/tests/data/acpi/q35/DSDT.acpihmat b/tests/data/acpi/q35/DSDT.acpihmat index 8d00f2ea0d..8c1e05a11a 100644 Binary files a/tests/data/acpi/q35/DSDT.acpihmat and b/tests/data/acpi/q35/DSDT.acpihmat differ diff --git a/tests/data/acpi/q35/DSDT.bridge b/tests/data/acpi/q35/DSDT.bridge index 55ad4bd7ab..6f1464b6c7 100644 Binary files a/tests/data/acpi/q35/DSDT.bridge and b/tests/data/acpi/q35/DSDT.bridge differ diff --git a/tests/data/acpi/q35/DSDT.cphp b/tests/data/acpi/q35/DSDT.cphp index ccde2add9f..f8337ff519 100644 Binary files a/tests/data/acpi/q35/DSDT.cphp and b/tests/data/acpi/q35/DSDT.cphp differ diff --git a/tests/data/acpi/q35/DSDT.dimmpxm b/tests/data/acpi/q35/DSDT.dimmpxm index b062e30117..fe5820d93d 100644 Binary files a/tests/data/acpi/q35/DSDT.dimmpxm and b/tests/data/acpi/q35/DSDT.dimmpxm differ diff --git a/tests/data/acpi/q35/DSDT.ipmibt b/tests/data/acpi/q35/DSDT.ipmibt index 1c5737692f..6317410658 100644 Binary files a/tests/data/acpi/q35/DSDT.ipmibt and b/tests/data/acpi/q35/DSDT.ipmibt differ diff --git a/tests/data/acpi/q35/DSDT.ivrs b/tests/data/acpi/q35/DSDT.ivrs new file mode 100644 index 0000000000..b0eafe90e5 Binary files /dev/null and b/tests/data/acpi/q35/DSDT.ivrs differ diff --git a/tests/data/acpi/q35/DSDT.memhp b/tests/data/acpi/q35/DSDT.memhp index 7b6f6487b2..9bc11518fc 100644 Binary files a/tests/data/acpi/q35/DSDT.memhp and b/tests/data/acpi/q35/DSDT.memhp differ diff --git a/tests/data/acpi/q35/DSDT.mmio64 b/tests/data/acpi/q35/DSDT.mmio64 index 2e0a772a85..713288a12e 100644 Binary files a/tests/data/acpi/q35/DSDT.mmio64 and b/tests/data/acpi/q35/DSDT.mmio64 differ diff --git a/tests/data/acpi/q35/DSDT.multi-bridge b/tests/data/acpi/q35/DSDT.multi-bridge new file mode 100644 index 0000000000..a24c713d22 Binary files /dev/null and b/tests/data/acpi/q35/DSDT.multi-bridge differ diff --git a/tests/data/acpi/q35/DSDT.nohpet b/tests/data/acpi/q35/DSDT.nohpet index ceb61f4115..e8202e6ddf 100644 Binary files a/tests/data/acpi/q35/DSDT.nohpet and b/tests/data/acpi/q35/DSDT.nohpet differ diff --git a/tests/data/acpi/q35/DSDT.numamem b/tests/data/acpi/q35/DSDT.numamem index a3f846df54..151e7cf429 100644 Binary files a/tests/data/acpi/q35/DSDT.numamem and b/tests/data/acpi/q35/DSDT.numamem differ diff --git a/tests/data/acpi/q35/DSDT.tis.tpm12 b/tests/data/acpi/q35/DSDT.tis.tpm12 index 6735e73971..c96b5277a1 100644 Binary files a/tests/data/acpi/q35/DSDT.tis.tpm12 and b/tests/data/acpi/q35/DSDT.tis.tpm12 differ diff --git a/tests/data/acpi/q35/DSDT.tis.tpm2 b/tests/data/acpi/q35/DSDT.tis.tpm2 index d1433e3c14..c92d4d29c7 100644 Binary files a/tests/data/acpi/q35/DSDT.tis.tpm2 and b/tests/data/acpi/q35/DSDT.tis.tpm2 differ diff --git a/tests/data/acpi/q35/DSDT.xapic b/tests/data/acpi/q35/DSDT.xapic new file mode 100644 index 0000000000..119fc90f1f Binary files /dev/null and b/tests/data/acpi/q35/DSDT.xapic differ diff --git a/tests/data/acpi/q35/FACP.xapic b/tests/data/acpi/q35/FACP.xapic new file mode 100644 index 0000000000..2d3659c9c6 Binary files /dev/null and b/tests/data/acpi/q35/FACP.xapic differ diff --git a/tests/data/acpi/q35/IVRS.ivrs b/tests/data/acpi/q35/IVRS.ivrs new file mode 100644 index 0000000000..17611202e5 Binary files /dev/null and b/tests/data/acpi/q35/IVRS.ivrs differ diff --git a/tests/data/acpi/q35/SRAT.xapic b/tests/data/acpi/q35/SRAT.xapic new file mode 100644 index 0000000000..1a91cfa65f Binary files /dev/null and b/tests/data/acpi/q35/SRAT.xapic differ diff --git a/tests/data/acpi/rebuild-expected-aml.sh b/tests/data/acpi/rebuild-expected-aml.sh index fc78770544..dcf2e2f221 100755 --- a/tests/data/acpi/rebuild-expected-aml.sh +++ b/tests/data/acpi/rebuild-expected-aml.sh @@ -12,7 +12,7 @@ # This work is licensed under the terms of the GNU GPLv2. # See the COPYING.LIB file in the top-level directory. -qemu_bins="./qemu-system-x86_64 ./qemu-system-aarch64" +qemu_arches="x86_64 aarch64" if [ ! -e "tests/qtest/bios-tables-test" ]; then echo "Test: bios-tables-test is required! Run make check before this script." @@ -20,6 +20,26 @@ if [ ! -e "tests/qtest/bios-tables-test" ]; then exit 1; fi +if grep TARGET_DIRS= config-host.mak; then + for arch in $qemu_arches; do + if grep TARGET_DIRS= config-host.mak | grep "$arch"-softmmu; + then + qemu_bins="$qemu_bins ./qemu-system-$arch" + fi + done +else + echo "config-host.mak missing!" + echo "Run this script from the build directory." + exit 1; +fi + +if [ -z "$qemu_bins" ]; then + echo "Only the following architectures are currently supported: $qemu_arches" + echo "None of these configured!" + echo "To fix, run configure --target-list=x86_64-softmmu,aarch64-softmmu" + exit 1; +fi + for qemu in $qemu_bins; do if [ ! -e $qemu ]; then echo "Run 'make' to build the following QEMU executables: $qemu_bins" diff --git a/tests/data/acpi/virt/DBG2 b/tests/data/acpi/virt/DBG2 new file mode 100644 index 0000000000..86e6314f7b Binary files /dev/null and b/tests/data/acpi/virt/DBG2 differ diff --git a/tests/data/acpi/virt/IORT b/tests/data/acpi/virt/IORT index 521acefe9b..7efd0ce8a6 100644 Binary files a/tests/data/acpi/virt/IORT and b/tests/data/acpi/virt/IORT differ diff --git a/tests/data/acpi/virt/IORT.memhp b/tests/data/acpi/virt/IORT.memhp index 521acefe9b..7efd0ce8a6 100644 Binary files a/tests/data/acpi/virt/IORT.memhp and b/tests/data/acpi/virt/IORT.memhp differ diff --git a/tests/data/acpi/virt/IORT.numamem b/tests/data/acpi/virt/IORT.numamem index 521acefe9b..7efd0ce8a6 100644 Binary files a/tests/data/acpi/virt/IORT.numamem and b/tests/data/acpi/virt/IORT.numamem differ diff --git a/tests/data/acpi/virt/IORT.pxb b/tests/data/acpi/virt/IORT.pxb index 521acefe9b..7efd0ce8a6 100644 Binary files a/tests/data/acpi/virt/IORT.pxb and b/tests/data/acpi/virt/IORT.pxb differ diff --git a/tests/data/acpi/virt/PPTT b/tests/data/acpi/virt/PPTT new file mode 100644 index 0000000000..7a1258ecf1 Binary files /dev/null and b/tests/data/acpi/virt/PPTT differ diff --git a/tests/docker/Makefile.include b/tests/docker/Makefile.include index ff5d732889..7a63a3b7f7 100644 --- a/tests/docker/Makefile.include +++ b/tests/docker/Makefile.include @@ -11,8 +11,10 @@ HOST_ARCH = $(if $(ARCH),$(ARCH),$(shell uname -m)) DOCKER_SUFFIX := .docker DOCKER_FILES_DIR := $(SRC_PATH)/tests/docker/dockerfiles # we don't run tests on intermediate images (used as base by another image) -DOCKER_PARTIAL_IMAGES := debian10 debian11 debian-bootstrap empty -DOCKER_IMAGES := $(sort $(notdir $(basename $(wildcard $(DOCKER_FILES_DIR)/*.docker)))) +DOCKER_PARTIAL_IMAGES := debian10 debian11 +# we don't directly build virtual images (they are used to build other images) +DOCKER_VIRTUAL_IMAGES := debian-bootstrap debian-toolchain empty +DOCKER_IMAGES := $(sort $(filter-out $(DOCKER_VIRTUAL_IMAGES), $(notdir $(basename $(wildcard $(DOCKER_FILES_DIR)/*.docker))))) DOCKER_TARGETS := $(patsubst %,docker-image-%,$(DOCKER_IMAGES)) # Use a global constant ccache directory to speed up repetitive builds DOCKER_CCACHE_DIR := $$HOME/.cache/qemu-docker-ccache @@ -141,11 +143,14 @@ docker-image-debian-mips64-cross: docker-image-debian10 docker-image-debian-mips64el-cross: docker-image-debian10 docker-image-debian-mipsel-cross: docker-image-debian10 docker-image-debian-ppc64el-cross: docker-image-debian10 -docker-image-debian-riscv64-cross: docker-image-debian10 docker-image-debian-s390x-cross: docker-image-debian10 docker-image-debian-sh4-cross: docker-image-debian10 docker-image-debian-sparc64-cross: docker-image-debian10 +# The native build should never use the registry +docker-image-debian-native: DOCKER_REGISTRY= + + # # The build rule for hexagon-cross is special in so far for most of # the time we don't want to build it. While dockers caching does avoid @@ -168,10 +173,39 @@ docker-image-debian-hexagon-cross: $(DOCKER_FILES_DIR)/debian-hexagon-cross.dock qemu/debian-hexagon-cross --add-current-user, \ "PREPARE", "debian-hexagon-cross")) +debian-toolchain-run = \ + $(if $(NOCACHE), \ + $(call quiet-command, \ + $(DOCKER_SCRIPT) build -t qemu/$1 -f $< \ + $(if $V,,--quiet) --no-cache \ + --registry $(DOCKER_REGISTRY) --extra-files \ + $(DOCKER_FILES_DIR)/$1.d/build-toolchain.sh, \ + "BUILD", $1), \ + $(call quiet-command, \ + $(DOCKER_SCRIPT) fetch $(if $V,,--quiet) \ + qemu/$1 $(DOCKER_REGISTRY), \ + "FETCH", $1) \ + $(call quiet-command, \ + $(DOCKER_SCRIPT) update $(if $V,,--quiet) \ + qemu/$1 \ + $(if $(NOUSER),,--add-current-user) \ + "PREPARE", $1)) +debian-toolchain = $(call debian-toolchain-run,$(patsubst docker-image-%,%,$1)) + +docker-image-debian-microblaze-cross: $(DOCKER_FILES_DIR)/debian-toolchain.docker \ + $(DOCKER_FILES_DIR)/debian-microblaze-cross.d/build-toolchain.sh + $(call debian-toolchain, $@) + +docker-image-debian-nios2-cross: $(DOCKER_FILES_DIR)/debian-toolchain.docker \ + $(DOCKER_FILES_DIR)/debian-nios2-cross.d/build-toolchain.sh + $(call debian-toolchain, $@) + # Specialist build images, sometimes very limited tools docker-image-debian-tricore-cross: docker-image-debian10 docker-image-debian-all-test-cross: docker-image-debian10 docker-image-debian-arm64-test-cross: docker-image-debian11 +docker-image-debian-microblaze-cross: docker-image-debian10 +docker-image-debian-nios2-cross: docker-image-debian10 docker-image-debian-powerpc-test-cross: docker-image-debian11 # These images may be good enough for building tests but not for test builds @@ -180,11 +214,12 @@ DOCKER_PARTIAL_IMAGES += debian-arm64-test-cross DOCKER_PARTIAL_IMAGES += debian-powerpc-test-cross DOCKER_PARTIAL_IMAGES += debian-hppa-cross DOCKER_PARTIAL_IMAGES += debian-m68k-cross debian-mips64-cross -DOCKER_PARTIAL_IMAGES += debian-riscv64-cross +DOCKER_PARTIAL_IMAGES += debian-microblaze-cross +DOCKER_PARTIAL_IMAGES += debian-nios2-cross DOCKER_PARTIAL_IMAGES += debian-sh4-cross debian-sparc64-cross DOCKER_PARTIAL_IMAGES += debian-tricore-cross DOCKER_PARTIAL_IMAGES += debian-xtensa-cross -DOCKER_PARTIAL_IMAGES += fedora-i386-cross fedora-cris-cross +DOCKER_PARTIAL_IMAGES += fedora-cris-cross # Rules for building linux-user powered images # @@ -193,7 +228,7 @@ DOCKER_PARTIAL_IMAGES += fedora-i386-cross fedora-cris-cross # packages. # Expand all the pre-requistes for each docker image and test combination -$(foreach i,$(filter-out $(DOCKER_PARTIAL_IMAGES),$(DOCKER_IMAGES)), \ +$(foreach i,$(filter-out $(DOCKER_PARTIAL_IMAGES) $(DOCKER_VIRTUAL_IMAGES),$(DOCKER_IMAGES)), \ $(foreach t,$(DOCKER_TESTS), \ $(eval .PHONY: docker-$t@$i) \ $(eval docker-$t@$i: docker-image-$i docker-run-$t@$i) \ diff --git a/tests/docker/common.rc b/tests/docker/common.rc index c5cc33d366..e6f8cee0d6 100755 --- a/tests/docker/common.rc +++ b/tests/docker/common.rc @@ -12,8 +12,14 @@ # the top-level directory. # This might be set by ENV of a docker container... it is always -# overriden by TARGET_LIST if the user sets it. -DEF_TARGET_LIST=${DEF_TARGET_LIST:-"x86_64-softmmu,aarch64-softmmu"} +# overriden by TARGET_LIST if the user sets it. We special case +# "none" to allow for other options like --disable-tcg to restrict the +# builds we eventually do. +if test "$DEF_TARGET_LIST" = "none"; then + DEF_TARGET_LIST="" +else + DEF_TARGET_LIST=${DEF_TARGET_LIST:-"x86_64-softmmu,aarch64-softmmu"} +fi requires_binary() { diff --git a/tests/docker/dockerfiles/debian-microblaze-cross.d/build-toolchain.sh b/tests/docker/dockerfiles/debian-microblaze-cross.d/build-toolchain.sh new file mode 100755 index 0000000000..23ec0aa9a7 --- /dev/null +++ b/tests/docker/dockerfiles/debian-microblaze-cross.d/build-toolchain.sh @@ -0,0 +1,88 @@ +#!/bin/bash + +set -e + +TARGET=microblaze-linux-musl +LINUX_ARCH=microblaze + +J=$(expr $(nproc) / 2) +TOOLCHAIN_INSTALL=/usr/local +TOOLCHAIN_BIN=${TOOLCHAIN_INSTALL}/bin +CROSS_SYSROOT=${TOOLCHAIN_INSTALL}/$TARGET/sys-root + +export PATH=${TOOLCHAIN_BIN}:$PATH + +# +# Grab all of the source for the toolchain bootstrap. +# + +wget https://ftp.gnu.org/gnu/binutils/binutils-2.37.tar.xz +wget https://ftp.gnu.org/gnu/gcc/gcc-11.2.0/gcc-11.2.0.tar.xz +wget https://www.musl-libc.org/releases/musl-1.2.2.tar.gz +wget https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.70.tar.xz + +tar axf binutils-2.37.tar.xz +tar axf gcc-11.2.0.tar.xz +tar axf musl-1.2.2.tar.gz +tar axf linux-5.10.70.tar.xz + +mv binutils-2.37 src-binu +mv gcc-11.2.0 src-gcc +mv musl-1.2.2 src-musl +mv linux-5.10.70 src-linux + +mkdir -p bld-hdr bld-binu bld-gcc bld-musl +mkdir -p ${CROSS_SYSROOT}/usr/include + +# +# Install kernel headers +# + +cd src-linux +make headers_install ARCH=${LINUX_ARCH} INSTALL_HDR_PATH=${CROSS_SYSROOT}/usr +cd .. + +# +# Build binutils +# + +cd bld-binu +../src-binu/configure --disable-werror \ + --prefix=${TOOLCHAIN_INSTALL} --with-sysroot --target=${TARGET} +make -j${J} +make install +cd .. + +# +# Build gcc, just the compiler so far. +# + +cd bld-gcc +../src-gcc/configure --disable-werror --disable-shared \ + --prefix=${TOOLCHAIN_INSTALL} --with-sysroot --target=${TARGET} \ + --enable-languages=c --disable-libssp --disable-libsanitizer \ + --disable-libatomic --disable-libgomp --disable-libquadmath +make -j${J} all-gcc +make install-gcc +cd .. + +# +# Build musl. +# We won't go through the extra step of building shared libraries +# because we don't actually use them in QEMU docker testing. +# + +cd bld-musl +../src-musl/configure --prefix=/usr --host=${TARGET} --disable-shared +make -j${j} +make install DESTDIR=${CROSS_SYSROOT} +cd .. + +# +# Go back and build the compiler runtime +# + +cd bld-gcc +make -j${j} +make install +cd .. diff --git a/tests/docker/dockerfiles/debian-native.docker b/tests/docker/dockerfiles/debian-native.docker new file mode 100644 index 0000000000..efd55cb6e0 --- /dev/null +++ b/tests/docker/dockerfiles/debian-native.docker @@ -0,0 +1,49 @@ +# +# Docker Debian Native +# +# This this intended to build QEMU on native host systems. Debian is +# chosen due to the broadest range on supported host systems for QEMU. +# +# This docker target is based on the docker.io Debian Bullseye base +# image rather than QEMU's base because we would otherwise confuse the +# build grabbing stuff from the registry built for other +# architectures. +# +FROM docker.io/library/debian:bullseye-slim +MAINTAINER Alex Bennée + +# Duplicate deb line as deb-src +RUN cat /etc/apt/sources.list | sed "s/^deb\ /deb-src /" >> /etc/apt/sources.list + +# Install common build utilities +RUN apt update && \ + DEBIAN_FRONTEND=noninteractive apt install -yy eatmydata + +RUN apt update && \ + DEBIAN_FRONTEND=noninteractive eatmydata \ + apt build-dep -yy --arch-only qemu + +RUN apt update && \ + DEBIAN_FRONTEND=noninteractive eatmydata \ + apt install -y --no-install-recommends \ + cscope \ + genisoimage \ + exuberant-ctags \ + global \ + libbz2-dev \ + liblzo2-dev \ + libgcrypt20-dev \ + libfdt-dev \ + librdmacm-dev \ + libsasl2-dev \ + libsnappy-dev \ + libvte-dev \ + netcat-openbsd \ + ninja-build \ + openssh-client \ + python3-numpy \ + python3-opencv \ + python3-venv + +ENV QEMU_CONFIGURE_OPTS $QEMU_CONFIGURE_OPTS +ENV DEF_TARGET_LIST "none" diff --git a/tests/docker/dockerfiles/debian-nios2-cross.d/build-toolchain.sh b/tests/docker/dockerfiles/debian-nios2-cross.d/build-toolchain.sh new file mode 100755 index 0000000000..ba3c9d8aff --- /dev/null +++ b/tests/docker/dockerfiles/debian-nios2-cross.d/build-toolchain.sh @@ -0,0 +1,87 @@ +#!/bin/bash + +set -e + +TARGET=nios2-linux-gnu +LINUX_ARCH=nios2 + +J=$(expr $(nproc) / 2) +TOOLCHAIN_INSTALL=/usr/local +TOOLCHAIN_BIN=${TOOLCHAIN_INSTALL}/bin +CROSS_SYSROOT=${TOOLCHAIN_INSTALL}/$TARGET/sys-root + +export PATH=${TOOLCHAIN_BIN}:$PATH + +# +# Grab all of the source for the toolchain bootstrap. +# + +wget https://ftp.gnu.org/gnu/binutils/binutils-2.37.tar.xz +wget https://ftp.gnu.org/gnu/gcc/gcc-11.2.0/gcc-11.2.0.tar.xz +wget https://ftp.gnu.org/gnu/glibc/glibc-2.34.tar.xz +wget https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.70.tar.xz + +tar axf binutils-2.37.tar.xz +tar axf gcc-11.2.0.tar.xz +tar axf glibc-2.34.tar.xz +tar axf linux-5.10.70.tar.xz + +mv binutils-2.37 src-binu +mv gcc-11.2.0 src-gcc +mv glibc-2.34 src-glibc +mv linux-5.10.70 src-linux + +mkdir -p bld-hdr bld-binu bld-gcc bld-glibc +mkdir -p ${CROSS_SYSROOT}/usr/include + +# +# Install kernel and glibc headers +# + +cd src-linux +make headers_install ARCH=${LINUX_ARCH} INSTALL_HDR_PATH=${CROSS_SYSROOT}/usr +cd .. + +cd bld-hdr +../src-glibc/configure --prefix=/usr --host=${TARGET} +make install-headers DESTDIR=${CROSS_SYSROOT} +touch ${CROSS_SYSROOT}/usr/include/gnu/stubs.h +cd .. + +# +# Build binutils +# + +cd bld-binu +../src-binu/configure --disable-werror \ + --prefix=${TOOLCHAIN_INSTALL} --with-sysroot --target=${TARGET} +make -j${J} +make install +cd .. + +# +# Build gcc, without shared libraries, because we do not yet +# have a shared libc against which to link. +# + +cd bld-gcc +../src-gcc/configure --disable-werror --disable-shared \ + --prefix=${TOOLCHAIN_INSTALL} --with-sysroot --target=${TARGET} \ + --enable-languages=c --disable-libssp --disable-libsanitizer \ + --disable-libatomic --disable-libgomp --disable-libquadmath +make -j${J} +make install +cd .. + +# +# Build glibc +# There are a few random things that use c++ but we didn't build that +# cross-compiler. We can get away without them. Disable CXX so that +# glibc doesn't try to use the host c++ compiler. +# + +cd bld-glibc +CXX=false ../src-glibc/configure --prefix=/usr --host=${TARGET} +make -j${j} +make install DESTDIR=${CROSS_SYSROOT} +cd .. diff --git a/tests/docker/dockerfiles/debian-riscv64-cross.docker b/tests/docker/dockerfiles/debian-riscv64-cross.docker index 2bbff19772..594d97982c 100644 --- a/tests/docker/dockerfiles/debian-riscv64-cross.docker +++ b/tests/docker/dockerfiles/debian-riscv64-cross.docker @@ -1,12 +1,48 @@ # -# Docker cross-compiler target +# Docker cross-compiler target for riscv64 # -# This docker target builds on the debian Buster base image. +# Currently the only distro that gets close to cross compiling riscv64 +# images is Debian Sid (with unofficial ports). As this is a moving +# target we keep the library list minimal and are aiming to migrate +# from this hack as soon as we are able. # -FROM qemu/debian10 +FROM docker.io/library/debian:sid-slim + +# Add ports +RUN apt update && \ + DEBIAN_FRONTEND=noninteractive apt install -yy eatmydata && \ + DEBIAN_FRONTEND=noninteractive eatmydata apt update -yy && \ + DEBIAN_FRONTEND=noninteractive eatmydata apt upgrade -yy + +# Install common build utilities +RUN DEBIAN_FRONTEND=noninteractive eatmydata apt install -yy \ + bc \ + build-essential \ + ca-certificates \ + debian-ports-archive-keyring \ + dpkg-dev \ + gettext \ + git \ + ninja-build \ + pkg-config \ + python3 + +# Add ports and riscv64 architecture +RUN echo "deb http://ftp.ports.debian.org/debian-ports/ sid main" >> /etc/apt/sources.list +RUN dpkg --add-architecture riscv64 + +# Duplicate deb line as deb-src +RUN cat /etc/apt/sources.list | sed "s/^deb\ /deb-src /" >> /etc/apt/sources.list RUN apt update && \ DEBIAN_FRONTEND=noninteractive eatmydata \ apt install -y --no-install-recommends \ - gcc-riscv64-linux-gnu \ - libc6-dev-riscv64-cross + gcc-riscv64-linux-gnu \ + libc6-dev-riscv64-cross \ + libffi-dev:riscv64 \ + libglib2.0-dev:riscv64 \ + libpixman-1-dev:riscv64 + +# Specify the cross prefix for this image (see tests/docker/common.rc) +ENV QEMU_CONFIGURE_OPTS --cross-prefix=riscv64-linux-gnu- +ENV DEF_TARGET_LIST riscv64-softmmu,riscv64-linux-user diff --git a/tests/docker/dockerfiles/debian-toolchain.docker b/tests/docker/dockerfiles/debian-toolchain.docker new file mode 100644 index 0000000000..738d808aa6 --- /dev/null +++ b/tests/docker/dockerfiles/debian-toolchain.docker @@ -0,0 +1,36 @@ +# +# Docker toolchain cross-compiler +# +# This dockerfile is used for building a cross-compiler toolchain. +# The script for building the toolchain is supplied via extra-files. +# +FROM qemu/debian10 + +# Install build utilities for building gcc and glibc. +# ??? The build-dep isn't working, missing a number of +# minimal build dependiencies, e.g. libmpc. + +RUN apt update && \ + DEBIAN_FRONTEND=noninteractive apt install -yy eatmydata && \ + DEBIAN_FRONTEND=noninteractive eatmydata \ + apt install -y --no-install-recommends \ + bison \ + flex \ + gawk \ + libmpc-dev \ + libmpfr-dev \ + rsync \ + texinfo \ + wget && \ + DEBIAN_FRONTEND=noninteractive eatmydata \ + apt build-dep -yy --arch-only gcc glibc + +ADD build-toolchain.sh /root/build-toolchain.sh + +RUN cd /root && ./build-toolchain.sh + +# Throw away the extra toolchain build deps, the downloaded source, +# and the build trees by restoring the original debian10 image, +# then copying the built toolchain from stage 0. +FROM qemu/debian10 +COPY --from=0 /usr/local /usr/local diff --git a/tests/docker/dockerfiles/fedora-i386-cross.docker b/tests/docker/dockerfiles/fedora-i386-cross.docker index dbb8195eb1..f62a71ce22 100644 --- a/tests/docker/dockerfiles/fedora-i386-cross.docker +++ b/tests/docker/dockerfiles/fedora-i386-cross.docker @@ -1,4 +1,5 @@ -FROM registry.fedoraproject.org/fedora:33 +FROM registry.fedoraproject.org/fedora:34 + ENV PACKAGES \ bzip2 \ ccache \ @@ -17,12 +18,14 @@ ENV PACKAGES \ glibc-static.i686 \ gnutls-devel.i686 \ nettle-devel.i686 \ + pcre-devel.i686 \ perl-Test-Harness \ pixman-devel.i686 \ + sysprof-capture-devel.i686 \ zlib-devel.i686 -ENV QEMU_CONFIGURE_OPTS --extra-cflags=-m32 --disable-vhost-user -ENV PKG_CONFIG_PATH /usr/lib/pkgconfig +ENV QEMU_CONFIGURE_OPTS --cpu=i386 --disable-vhost-user +ENV PKG_CONFIG_LIBDIR /usr/lib/pkgconfig -RUN dnf install -y $PACKAGES +RUN dnf update -y && dnf install -y $PACKAGES RUN rpm -q $PACKAGES | sort > /packages.txt diff --git a/tests/meson.build b/tests/meson.build index 55a7b08275..3f3882748a 100644 --- a/tests/meson.build +++ b/tests/meson.build @@ -67,10 +67,6 @@ if have_tools and 'CONFIG_VHOST_USER' in config_host and 'CONFIG_LINUX' in confi dependencies: [qemuutil, vhost_user]) endif -if have_system and 'CONFIG_POSIX' in config_host - subdir('qemu-iotests') -endif - test('decodetree', sh, args: [ files('decode/check.sh'), config_host['PYTHON'], files('../scripts/decodetree.py') ], workdir: meson.current_source_dir() / 'decode', diff --git a/tests/plugin/insn.c b/tests/plugin/insn.c index 0f6a1938c1..d229fdc001 100644 --- a/tests/plugin/insn.c +++ b/tests/plugin/insn.c @@ -18,6 +18,8 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; static uint64_t insn_count; static bool do_inline; +static bool do_size; +static GArray *sizes; static void vcpu_insn_exec_before(unsigned int cpu_index, void *udata) { @@ -49,13 +51,35 @@ static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) insn, vcpu_insn_exec_before, QEMU_PLUGIN_CB_NO_REGS, GUINT_TO_POINTER(vaddr)); } + + if (do_size) { + size_t sz = qemu_plugin_insn_size(insn); + if (sz > sizes->len) { + g_array_set_size(sizes, sz); + } + unsigned long *cnt = &g_array_index(sizes, unsigned long, sz); + (*cnt)++; + } } } static void plugin_exit(qemu_plugin_id_t id, void *p) { - g_autofree gchar *out = g_strdup_printf("insns: %" PRIu64 "\n", insn_count); - qemu_plugin_outs(out); + g_autoptr(GString) out = g_string_new(NULL); + + if (do_size) { + int i; + for (i = 0; i <= sizes->len; i++) { + unsigned long *cnt = &g_array_index(sizes, unsigned long, i); + if (*cnt) { + g_string_append_printf(out, + "len %d bytes: %ld insns\n", i, *cnt); + } + } + } else { + g_string_append_printf(out, "insns: %" PRIu64 "\n", insn_count); + } + qemu_plugin_outs(out->str); } QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, @@ -70,12 +94,21 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, fprintf(stderr, "boolean argument parsing failed: %s\n", opt); return -1; } + } else if (g_strcmp0(tokens[0], "sizes") == 0) { + if (!qemu_plugin_bool_parse(tokens[0], tokens[1], &do_size)) { + fprintf(stderr, "boolean argument parsing failed: %s\n", opt); + return -1; + } } else { fprintf(stderr, "option parsing failed: %s\n", opt); return -1; } } + if (do_size) { + sizes = g_array_new(true, true, sizeof(unsigned long)); + } + qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); qemu_plugin_register_atexit_cb(id, plugin_exit, NULL); return 0; diff --git a/tests/qapi-schema/doc-bad-feature.err b/tests/qapi-schema/doc-bad-feature.err index e4c62adfa3..49d1746c3d 100644 --- a/tests/qapi-schema/doc-bad-feature.err +++ b/tests/qapi-schema/doc-bad-feature.err @@ -1 +1 @@ -doc-bad-feature.json:3: documented member 'a' does not exist +doc-bad-feature.json:3: documented feature 'a' does not exist diff --git a/tests/qapi-schema/doc-empty-symbol.err b/tests/qapi-schema/doc-empty-symbol.err index 81b90e882a..aa51be41b2 100644 --- a/tests/qapi-schema/doc-empty-symbol.err +++ b/tests/qapi-schema/doc-empty-symbol.err @@ -1 +1 @@ -doc-empty-symbol.json:4:1: invalid name +doc-empty-symbol.json:4:1: name required after '@' diff --git a/tests/qapi-schema/doc-good.json b/tests/qapi-schema/doc-good.json index a20acffd8b..74745fb405 100644 --- a/tests/qapi-schema/doc-good.json +++ b/tests/qapi-schema/doc-good.json @@ -53,20 +53,25 @@ ## # @Enum: +# # @one: The _one_ {and only} # # Features: # @enum-feat: Also _one_ {and only} +# @enum-member-feat: a member feature # # @two is undocumented ## { 'enum': 'Enum', - 'data': [ { 'name': 'one', 'if': 'IFONE' }, 'two' ], + 'data': [ { 'name': 'one', 'if': 'IFONE', + 'features': [ 'enum-member-feat' ] }, + 'two' ], 'features': [ 'enum-feat' ], 'if': 'IFCOND' } ## # @Base: +# # @base1: # the first member ## @@ -75,6 +80,7 @@ ## # @Variant1: +# # A paragraph # # Another paragraph (but no @var: line) @@ -91,11 +97,13 @@ ## # @Variant2: +# ## { 'struct': 'Variant2', 'data': {} } ## # @Object: +# # Features: # @union-feat1: a feature ## @@ -109,6 +117,7 @@ ## # @Alternate: +# # @i: an integer # @b is undocumented # @@ -126,6 +135,7 @@ ## # @cmd: +# # @arg1: the first argument # # @arg2: the second @@ -175,6 +185,7 @@ ## # @EVT_BOXED: +# # Features: # @feat3: a feature ## diff --git a/tests/qapi-schema/doc-good.out b/tests/qapi-schema/doc-good.out index 5a324e2627..9dd65b9d92 100644 --- a/tests/qapi-schema/doc-good.out +++ b/tests/qapi-schema/doc-good.out @@ -13,6 +13,7 @@ module doc-good.json enum Enum member one if IFONE + feature enum-member-feat member two if IFCOND feature enum-feat @@ -108,6 +109,8 @@ The _one_ {and only} feature=enum-feat Also _one_ {and only} + feature=enum-member-feat +a member feature section=None @two is undocumented doc symbol=Base diff --git a/tests/qapi-schema/doc-good.txt b/tests/qapi-schema/doc-good.txt index 701402ee5e..b3b76bd43f 100644 --- a/tests/qapi-schema/doc-good.txt +++ b/tests/qapi-schema/doc-good.txt @@ -56,6 +56,9 @@ Features "enum-feat" Also _one_ {and only} +"enum-member-feat" + a member feature + "two" is undocumented diff --git a/tests/qapi-schema/enum-dict-member-unknown.err b/tests/qapi-schema/enum-dict-member-unknown.err index f8617ea179..235cde0c49 100644 --- a/tests/qapi-schema/enum-dict-member-unknown.err +++ b/tests/qapi-schema/enum-dict-member-unknown.err @@ -1,3 +1,3 @@ enum-dict-member-unknown.json: In enum 'MyEnum': enum-dict-member-unknown.json:2: 'data' member has unknown key 'bad-key' -Valid keys are 'if', 'name'. +Valid keys are 'features', 'if', 'name'. diff --git a/tests/qapi-schema/meson.build b/tests/qapi-schema/meson.build index 6187efbd58..caf0791ba8 100644 --- a/tests/qapi-schema/meson.build +++ b/tests/qapi-schema/meson.build @@ -1,5 +1,5 @@ test_env = environment() -test_env.set('PYTHONPATH', meson.source_root() / 'scripts') +test_env.set('PYTHONPATH', meson.project_source_root() / 'scripts') test_env.set('PYTHONIOENCODING', 'utf-8') schemas = [ @@ -241,15 +241,17 @@ if build_docs output: ['doc-good.txt'], input: files('doc-good.json', 'doc-good.rst'), build_by_default: true, - depend_files: sphinx_extn_depends, + depfile: 'docs.d', # We use -E to suppress Sphinx's caching, because # we want it to always really run the QAPI doc # generation code. It also means we don't # clutter up the build dir with the cache. command: [SPHINX_ARGS, '-b', 'text', '-E', - '-c', meson.source_root() / 'docs', + '-c', meson.project_source_root() / 'docs', '-D', 'master_doc=doc-good', + '-Ddepfile=@DEPFILE@', + '-Ddepfile_stamp=doc-good.stamp', meson.current_source_dir(), meson.current_build_dir()]) diff --git a/tests/qapi-schema/qapi-schema-test.json b/tests/qapi-schema/qapi-schema-test.json index 2ec50109cb..43b8697002 100644 --- a/tests/qapi-schema/qapi-schema-test.json +++ b/tests/qapi-schema/qapi-schema-test.json @@ -273,7 +273,7 @@ 'data': { 'foo': { 'type': 'int', 'features': [ 'deprecated' ] } }, 'features': [ 'feature1' ] } { 'struct': 'FeatureStruct2', - 'data': { 'foo': 'int' }, + 'data': { 'foo': { 'type': 'int', 'features': [ 'unstable' ] } }, 'features': [ { 'name': 'feature1' } ] } { 'struct': 'FeatureStruct3', 'data': { 'foo': 'int' }, @@ -301,7 +301,8 @@ 'TEST_IF_COND_2'] } } ] } { 'enum': 'FeatureEnum1', - 'data': [ 'eins', 'zwei', 'drei' ], + 'data': [ 'eins', 'zwei', + { 'name': 'drei', 'features': [ 'deprecated' ] } ], 'features': [ 'feature1' ] } { 'union': 'FeatureUnion1', @@ -330,7 +331,7 @@ { 'command': 'test-command-features1', 'features': [ 'deprecated' ] } { 'command': 'test-command-features3', - 'features': [ 'feature1', 'feature2' ] } + 'features': [ 'unstable', 'feature1', 'feature2' ] } { 'command': 'test-command-cond-features1', 'features': [ { 'name': 'feature1', 'if': 'TEST_IF_FEATURE_1'} ] } @@ -347,3 +348,6 @@ { 'event': 'TEST_EVENT_FEATURES1', 'features': [ 'deprecated' ] } + +{ 'event': 'TEST_EVENT_FEATURES2', + 'features': [ 'unstable' ] } diff --git a/tests/qapi-schema/qapi-schema-test.out b/tests/qapi-schema/qapi-schema-test.out index 9337adc9ea..1f9585fa9b 100644 --- a/tests/qapi-schema/qapi-schema-test.out +++ b/tests/qapi-schema/qapi-schema-test.out @@ -308,6 +308,7 @@ object FeatureStruct1 feature feature1 object FeatureStruct2 member foo: int optional=False + feature unstable feature feature1 object FeatureStruct3 member foo: int optional=False @@ -341,6 +342,7 @@ enum FeatureEnum1 member eins member zwei member drei + feature deprecated feature feature1 object q_obj_FeatureUnion1-base member tag: FeatureEnum1 optional=False @@ -372,6 +374,7 @@ command test-command-features1 None -> None feature deprecated command test-command-features3 None -> None gen=True success_response=True boxed=False oob=False preconfig=False + feature unstable feature feature1 feature feature2 command test-command-cond-features1 None -> None @@ -393,6 +396,9 @@ event TEST_EVENT_FEATURES0 FeatureStruct1 event TEST_EVENT_FEATURES1 None boxed=False feature deprecated +event TEST_EVENT_FEATURES2 None + boxed=False + feature unstable module include/sub-module.json include sub-sub-module.json object SecondArrayRef diff --git a/tests/qapi-schema/test-qapi.py b/tests/qapi-schema/test-qapi.py index c717a7a90b..2160cef082 100755 --- a/tests/qapi-schema/test-qapi.py +++ b/tests/qapi-schema/test-qapi.py @@ -37,6 +37,7 @@ class QAPISchemaTestVisitor(QAPISchemaVisitor): for m in members: print(' member %s' % m.name) self._print_if(m.ifcond, indent=8) + self._print_features(m.features, indent=8) self._print_if(ifcond) self._print_features(features) diff --git a/tests/qemu-iotests/040 b/tests/qemu-iotests/040 index f3677de9df..6af5ab9e76 100755 --- a/tests/qemu-iotests/040 +++ b/tests/qemu-iotests/040 @@ -92,10 +92,9 @@ class TestSingleDrive(ImageCommitTestCase): self.vm.add_device('virtio-scsi') self.vm.add_device("scsi-hd,id=scsi0,drive=drive0") self.vm.launch() - self.has_quit = False def tearDown(self): - self.vm.shutdown(has_quit=self.has_quit) + self.vm.shutdown() os.remove(test_img) os.remove(mid_img) os.remove(backing_img) @@ -127,8 +126,6 @@ class TestSingleDrive(ImageCommitTestCase): result = self.vm.qmp('quit') self.assert_qmp(result, 'return', {}) - self.has_quit = True - # Same as above, but this time we add the filter after starting the job @iotests.skip_if_unsupported(['throttle']) def test_commit_plus_filter_and_quit(self): @@ -147,8 +144,6 @@ class TestSingleDrive(ImageCommitTestCase): result = self.vm.qmp('quit') self.assert_qmp(result, 'return', {}) - self.has_quit = True - def test_device_not_found(self): result = self.vm.qmp('block-commit', device='nonexistent', top='%s' % mid_img) self.assert_qmp(result, 'error/class', 'DeviceNotFound') diff --git a/tests/qemu-iotests/049.out b/tests/qemu-iotests/049.out index 01f7b1f240..8719c91b48 100644 --- a/tests/qemu-iotests/049.out +++ b/tests/qemu-iotests/049.out @@ -174,11 +174,11 @@ Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off comp qemu-img create -f qcow2 -o compat=0.42 TEST_DIR/t.qcow2 64M Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=67108864 compat=0.42 lazy_refcounts=off refcount_bits=16 -qemu-img: TEST_DIR/t.qcow2: Invalid parameter '0.42' +qemu-img: TEST_DIR/t.qcow2: Parameter 'version' does not accept value '0.42' qemu-img create -f qcow2 -o compat=foobar TEST_DIR/t.qcow2 64M Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off compression_type=zlib size=67108864 compat=foobar lazy_refcounts=off refcount_bits=16 -qemu-img: TEST_DIR/t.qcow2: Invalid parameter 'foobar' +qemu-img: TEST_DIR/t.qcow2: Parameter 'version' does not accept value 'foobar' == Check preallocation option == @@ -190,7 +190,7 @@ Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off prea qemu-img create -f qcow2 -o preallocation=1234 TEST_DIR/t.qcow2 64M Formatting 'TEST_DIR/t.qcow2', fmt=qcow2 cluster_size=65536 extended_l2=off preallocation=1234 compression_type=zlib size=67108864 lazy_refcounts=off refcount_bits=16 -qemu-img: TEST_DIR/t.qcow2: Invalid parameter '1234' +qemu-img: TEST_DIR/t.qcow2: Parameter 'preallocation' does not accept value '1234' == Check encryption option == diff --git a/tests/qemu-iotests/051 b/tests/qemu-iotests/051 index 7bf29343d7..1d2fa93a11 100755 --- a/tests/qemu-iotests/051 +++ b/tests/qemu-iotests/051 @@ -199,7 +199,7 @@ case "$QEMU_DEFAULT_MACHINE" in # virtio-blk enables the iothread only when the driver initialises the # device, so a second virtio-blk device can't be added even with the # same iothread. virtio-scsi allows this. - run_qemu $iothread -device virtio-blk-pci,drive=disk,iothread=iothread0,share-rw=on + run_qemu $iothread -device virtio-blk-pci,drive=disk,iothread=thread0,share-rw=on run_qemu $iothread -device virtio-scsi,id=virtio-scsi1,iothread=thread0 -device scsi-hd,bus=virtio-scsi1.0,drive=disk,share-rw=on ;; *) diff --git a/tests/qemu-iotests/051.pc.out b/tests/qemu-iotests/051.pc.out index afe7632964..063e4fc584 100644 --- a/tests/qemu-iotests/051.pc.out +++ b/tests/qemu-iotests/051.pc.out @@ -183,9 +183,9 @@ Testing: -drive file=TEST_DIR/t.qcow2,if=none,node-name=disk -object iothread,id QEMU X.Y.Z monitor - type 'help' for more information (qemu) QEMU_PROG: -device scsi-hd,bus=virtio-scsi1.0,drive=disk,share-rw=on: Cannot change iothread of active block backend -Testing: -drive file=TEST_DIR/t.qcow2,if=none,node-name=disk -object iothread,id=thread0 -device virtio-scsi,iothread=thread0,id=virtio-scsi0 -device scsi-hd,bus=virtio-scsi0.0,drive=disk,share-rw=on -device virtio-blk-pci,drive=disk,iothread=iothread0,share-rw=on +Testing: -drive file=TEST_DIR/t.qcow2,if=none,node-name=disk -object iothread,id=thread0 -device virtio-scsi,iothread=thread0,id=virtio-scsi0 -device scsi-hd,bus=virtio-scsi0.0,drive=disk,share-rw=on -device virtio-blk-pci,drive=disk,iothread=thread0,share-rw=on QEMU X.Y.Z monitor - type 'help' for more information -(qemu) QEMU_PROG: -device virtio-blk-pci,drive=disk,iothread=iothread0,share-rw=on: Cannot change iothread of active block backend +(qemu) QEMU_PROG: -device virtio-blk-pci,drive=disk,iothread=thread0,share-rw=on: Cannot change iothread of active block backend Testing: -drive file=TEST_DIR/t.qcow2,if=none,node-name=disk -object iothread,id=thread0 -device virtio-scsi,iothread=thread0,id=virtio-scsi0 -device scsi-hd,bus=virtio-scsi0.0,drive=disk,share-rw=on -device virtio-scsi,id=virtio-scsi1,iothread=thread0 -device scsi-hd,bus=virtio-scsi1.0,drive=disk,share-rw=on QEMU X.Y.Z monitor - type 'help' for more information diff --git a/tests/qemu-iotests/085 b/tests/qemu-iotests/085 index d557522943..de74262a26 100755 --- a/tests/qemu-iotests/085 +++ b/tests/qemu-iotests/085 @@ -103,11 +103,18 @@ do_blockdev_add() } # ${1}: unique identifier for the snapshot filename -add_snapshot_image() +create_snapshot_image() { base_image="${TEST_DIR}/$((${1}-1))-${snapshot_virt0}" snapshot_file="${TEST_DIR}/${1}-${snapshot_virt0}" TEST_IMG=$snapshot_file _make_test_img -u -b "${base_image}" -F $IMGFMT "$size" +} + +# ${1}: unique identifier for the snapshot filename +add_snapshot_image() +{ + snapshot_file="${TEST_DIR}/${1}-${snapshot_virt0}" + create_snapshot_image "$1" do_blockdev_add "$1" "'backing': null, " "${snapshot_file}" } @@ -230,6 +237,28 @@ _make_test_img -b "${TEST_IMG}.base" -F $IMGFMT "$size" do_blockdev_add ${SNAPSHOTS} "" "${TEST_IMG}" blockdev_snapshot ${SNAPSHOTS} error +echo +echo === Invalid command - creating loops === +echo + +SNAPSHOTS=$((${SNAPSHOTS}+1)) +add_snapshot_image ${SNAPSHOTS} + +_send_qemu_cmd $h "{ 'execute': 'blockdev-snapshot', + 'arguments': { 'node':'snap_${SNAPSHOTS}', + 'overlay':'snap_${SNAPSHOTS}' } + }" "error" + +SNAPSHOTS=$((${SNAPSHOTS}+1)) +create_snapshot_image ${SNAPSHOTS} +do_blockdev_add ${SNAPSHOTS} "'backing': 'snap_$((${SNAPSHOTS}-1))', " \ + "${TEST_DIR}/${SNAPSHOTS}-${snapshot_virt0}" + +_send_qemu_cmd $h "{ 'execute': 'blockdev-snapshot', + 'arguments': { 'node':'snap_${SNAPSHOTS}', + 'overlay':'snap_$((${SNAPSHOTS}-1))' } + }" "error" + echo echo === Invalid command - The node does not exist === echo diff --git a/tests/qemu-iotests/085.out b/tests/qemu-iotests/085.out index 1d4c565b6d..b543b992ff 100644 --- a/tests/qemu-iotests/085.out +++ b/tests/qemu-iotests/085.out @@ -217,15 +217,42 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 backing_file=TEST_DIR/ 'overlay':'snap_13' } } {"error": {"class": "GenericError", "desc": "The overlay already has a backing image"}} +=== Invalid command - creating loops === + +Formatting 'TEST_DIR/14-snapshot-v0.IMGFMT', fmt=IMGFMT size=134217728 backing_file=TEST_DIR/13-snapshot-v0.IMGFMT backing_fmt=IMGFMT +{ 'execute': 'blockdev-add', 'arguments': + { 'driver': 'IMGFMT', 'node-name': 'snap_14', 'backing': null, + 'file': + { 'driver': 'file', 'filename': 'TEST_DIR/14-snapshot-v0.IMGFMT', + 'node-name': 'file_14' } } } +{"return": {}} +{ 'execute': 'blockdev-snapshot', + 'arguments': { 'node':'snap_14', + 'overlay':'snap_14' } + } +{"error": {"class": "GenericError", "desc": "Making 'snap_14' a backing child of 'snap_14' would create a cycle"}} +Formatting 'TEST_DIR/15-snapshot-v0.IMGFMT', fmt=IMGFMT size=134217728 backing_file=TEST_DIR/14-snapshot-v0.IMGFMT backing_fmt=IMGFMT +{ 'execute': 'blockdev-add', 'arguments': + { 'driver': 'IMGFMT', 'node-name': 'snap_15', 'backing': 'snap_14', + 'file': + { 'driver': 'file', 'filename': 'TEST_DIR/15-snapshot-v0.IMGFMT', + 'node-name': 'file_15' } } } +{"return": {}} +{ 'execute': 'blockdev-snapshot', + 'arguments': { 'node':'snap_15', + 'overlay':'snap_14' } + } +{"error": {"class": "GenericError", "desc": "Making 'snap_15' a backing child of 'snap_14' would create a cycle"}} + === Invalid command - The node does not exist === { 'execute': 'blockdev-snapshot', 'arguments': { 'node': 'virtio0', - 'overlay':'snap_14' } } -{"error": {"class": "GenericError", "desc": "Cannot find device='snap_14' nor node-name='snap_14'"}} + 'overlay':'snap_16' } } +{"error": {"class": "GenericError", "desc": "Cannot find device='snap_16' nor node-name='snap_16'"}} { 'execute': 'blockdev-snapshot', 'arguments': { 'node':'nodevice', - 'overlay':'snap_13' } + 'overlay':'snap_15' } } {"error": {"class": "GenericError", "desc": "Cannot find device='nodevice' nor node-name='nodevice'"}} *** done diff --git a/tests/qemu-iotests/109.out b/tests/qemu-iotests/109.out index 8f839b4b7f..e29280015e 100644 --- a/tests/qemu-iotests/109.out +++ b/tests/qemu-iotests/109.out @@ -44,9 +44,8 @@ read 512/512 bytes at offset 0 {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}} Images are identical. @@ -95,9 +94,8 @@ read 512/512 bytes at offset 0 {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 197120, "offset": 197120, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "src", "len": 197120, "offset": 197120, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}} Images are identical. @@ -146,9 +144,8 @@ read 512/512 bytes at offset 0 {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}} Images are identical. @@ -197,9 +194,8 @@ read 512/512 bytes at offset 0 {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "src", "len": 1024, "offset": 1024, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}} Images are identical. @@ -248,9 +244,8 @@ read 512/512 bytes at offset 0 {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "src", "len": 65536, "offset": 65536, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}} Images are identical. @@ -299,9 +294,8 @@ read 512/512 bytes at offset 0 {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}} Images are identical. @@ -349,9 +343,8 @@ read 512/512 bytes at offset 0 {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "src", "len": 2560, "offset": 2560, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}} Images are identical. @@ -399,9 +392,8 @@ read 512/512 bytes at offset 0 {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 31457280, "offset": 31457280, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "src", "len": 31457280, "offset": 31457280, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}} Images are identical. @@ -449,9 +441,8 @@ read 512/512 bytes at offset 0 {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "src", "len": 327680, "offset": 327680, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}} Images are identical. @@ -499,9 +490,8 @@ read 512/512 bytes at offset 0 {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 2048, "offset": 2048, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "src", "len": 2048, "offset": 2048, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}} Images are identical. @@ -529,9 +519,8 @@ WARNING: Image format was not specified for 'TEST_DIR/t.raw' and probing guessed {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}} Images are identical. @@ -552,9 +541,8 @@ Images are identical. {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "standby", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "ready", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "waiting", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "pending", "id": "src"}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "aborting", "id": "src"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "src", "len": 512, "offset": 512, "speed": 0, "type": "mirror"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "concluded", "id": "src"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "null", "id": "src"}} Images are identical. diff --git a/tests/qemu-iotests/129 b/tests/qemu-iotests/129 index 5251e2669e..c75ec62ecf 100755 --- a/tests/qemu-iotests/129 +++ b/tests/qemu-iotests/129 @@ -77,8 +77,8 @@ class TestStopWithBlockJob(iotests.QMPTestCase): self.do_test_stop("drive-backup", device="drive0", target=self.target_img, format=iotests.imgfmt, sync="full", - x_perf={ 'max-chunk': 65536, - 'max-workers': 8 }) + x_perf={'max-chunk': 65536, + 'max-workers': 8}) def test_block_commit(self): # Add overlay above the source node so that we actually use a @@ -88,13 +88,13 @@ class TestStopWithBlockJob(iotests.QMPTestCase): '1G') result = self.vm.qmp('blockdev-add', **{ - 'node-name': 'overlay', - 'driver': iotests.imgfmt, - 'file': { - 'driver': 'file', - 'filename': self.overlay_img - } - }) + 'node-name': 'overlay', + 'driver': iotests.imgfmt, + 'file': { + 'driver': 'file', + 'filename': self.overlay_img + } + }) self.assert_qmp(result, 'return', {}) result = self.vm.qmp('blockdev-snapshot', diff --git a/tests/qemu-iotests/206.out b/tests/qemu-iotests/206.out index b68c443867..3593e8e9c2 100644 --- a/tests/qemu-iotests/206.out +++ b/tests/qemu-iotests/206.out @@ -192,7 +192,7 @@ Job failed: Could not resize image: Failed to grow the L1 table: File too large === Invalid version === {"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "qcow2", "file": "node0", "size": 67108864, "version": "v1"}}} -{"error": {"class": "GenericError", "desc": "Invalid parameter 'v1'"}} +{"error": {"class": "GenericError", "desc": "Parameter 'version' does not accept value 'v1'"}} {"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"driver": "qcow2", "file": "node0", "lazy-refcounts": true, "size": 67108864, "version": "v2"}}} {"return": {}} diff --git a/tests/qemu-iotests/218 b/tests/qemu-iotests/218 index 325d8244fb..4922b4d3b6 100755 --- a/tests/qemu-iotests/218 +++ b/tests/qemu-iotests/218 @@ -187,4 +187,4 @@ with iotests.VM() as vm, \ log(vm.qmp('quit')) with iotests.Timeout(5, 'Timeout waiting for VM to quit'): - vm.shutdown(has_quit=True) + vm.shutdown() diff --git a/tests/qemu-iotests/235 b/tests/qemu-iotests/235 index 8aed45f9a7..4de920c380 100755 --- a/tests/qemu-iotests/235 +++ b/tests/qemu-iotests/235 @@ -24,8 +24,6 @@ import os import iotests from iotests import qemu_img_create, qemu_io, file_path, log -sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) - from qemu.machine import QEMUMachine iotests.script_initialize(supported_fmts=['qcow2']) diff --git a/tests/qemu-iotests/237.out b/tests/qemu-iotests/237.out index aa94986803..2f09ff5512 100644 --- a/tests/qemu-iotests/237.out +++ b/tests/qemu-iotests/237.out @@ -116,13 +116,13 @@ Job failed: Cannot find device='this doesn't exist' nor node-name='this doesn't == Invalid adapter types == {"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"adapter-type": "foo", "driver": "vmdk", "file": "node0", "size": 33554432}}} -{"error": {"class": "GenericError", "desc": "Invalid parameter 'foo'"}} +{"error": {"class": "GenericError", "desc": "Parameter 'adapter-type' does not accept value 'foo'"}} {"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"adapter-type": "IDE", "driver": "vmdk", "file": "node0", "size": 33554432}}} -{"error": {"class": "GenericError", "desc": "Invalid parameter 'IDE'"}} +{"error": {"class": "GenericError", "desc": "Parameter 'adapter-type' does not accept value 'IDE'"}} {"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"adapter-type": "legacyesx", "driver": "vmdk", "file": "node0", "size": 33554432}}} -{"error": {"class": "GenericError", "desc": "Invalid parameter 'legacyesx'"}} +{"error": {"class": "GenericError", "desc": "Parameter 'adapter-type' does not accept value 'legacyesx'"}} {"execute": "blockdev-create", "arguments": {"job-id": "job0", "options": {"adapter-type": 1, "driver": "vmdk", "file": "node0", "size": 33554432}}} {"error": {"class": "GenericError", "desc": "Invalid parameter type for 'options.adapter-type', expected: string"}} diff --git a/tests/qemu-iotests/245 b/tests/qemu-iotests/245 index bf8261eec0..24ac43f70e 100755 --- a/tests/qemu-iotests/245 +++ b/tests/qemu-iotests/245 @@ -149,7 +149,7 @@ class TestBlockdevReopen(iotests.QMPTestCase): self.reopen(opts, {'node-name': ''}, "Failed to find node with node-name=''") self.reopen(opts, {'node-name': None}, "Invalid parameter type for 'options[0].node-name', expected: string") self.reopen(opts, {'driver': 'raw'}, "Cannot change the option 'driver'") - self.reopen(opts, {'driver': ''}, "Invalid parameter ''") + self.reopen(opts, {'driver': ''}, "Parameter 'driver' does not accept value ''") self.reopen(opts, {'driver': None}, "Invalid parameter type for 'options[0].driver', expected: string") self.reopen(opts, {'file': 'not-found'}, "Cannot find device='' nor node-name='not-found'") self.reopen(opts, {'file': ''}, "Cannot find device='' nor node-name=''") @@ -1189,10 +1189,10 @@ class TestBlockdevReopen(iotests.QMPTestCase): self.run_test_iothreads('iothread0', 'iothread0') def test_iothreads_switch_backing(self): - self.run_test_iothreads('iothread0', None) + self.run_test_iothreads('iothread0', '') def test_iothreads_switch_overlay(self): - self.run_test_iothreads(None, 'iothread0') + self.run_test_iothreads('', 'iothread0') if __name__ == '__main__': iotests.activate_logging() diff --git a/tests/qemu-iotests/255 b/tests/qemu-iotests/255 index c43aa9c67a..3d6d0e80cb 100755 --- a/tests/qemu-iotests/255 +++ b/tests/qemu-iotests/255 @@ -123,4 +123,4 @@ with iotests.FilePath('src.qcow2') as src_path, \ vm.qmp_log('block-job-cancel', device='job0') vm.qmp_log('quit') - vm.shutdown(has_quit=True) + vm.shutdown() diff --git a/tests/qemu-iotests/287 b/tests/qemu-iotests/287 index 22ce9ff0e4..2d5334e8bf 100755 --- a/tests/qemu-iotests/287 +++ b/tests/qemu-iotests/287 @@ -53,7 +53,7 @@ CLUSTER_SIZE=65536 # Check if we can run this test. output=$(_make_test_img -o 'compression_type=zstd' 64M; _cleanup_test_img) -if echo "$output" | grep -q "Invalid parameter 'zstd'"; then +if echo "$output" | grep -q "Parameter 'compression-type' does not accept value 'zstd'"; then _notrun "ZSTD is disabled" fi diff --git a/tests/qemu-iotests/297 b/tests/qemu-iotests/297 index b04cba5366..ee78a62735 100755 --- a/tests/qemu-iotests/297 +++ b/tests/qemu-iotests/297 @@ -17,99 +17,66 @@ # along with this program. If not, see . import os -import re -import shutil import subprocess import sys +from typing import List import iotests +import linters -# TODO: Empty this list! -SKIP_FILES = ( - '030', '040', '041', '044', '045', '055', '056', '057', '065', '093', - '096', '118', '124', '132', '136', '139', '147', '148', '149', - '151', '152', '155', '163', '165', '194', '196', '202', - '203', '205', '206', '207', '208', '210', '211', '212', '213', '216', - '218', '219', '224', '228', '234', '235', '236', '237', '238', - '240', '242', '245', '246', '248', '255', '256', '257', '258', '260', - '262', '264', '266', '274', '277', '280', '281', '295', '296', '298', - '299', '302', '303', '304', '307', - 'nbd-fault-injector.py', 'qcow2.py', 'qcow2_format.py', 'qed.py' -) +# Looking for something? +# +# List of files to exclude from linting: linters.py +# mypy configuration: mypy.ini +# pylint configuration: pylintrc -def is_python_file(filename): - if not os.path.isfile(filename): +def check_linter(linter: str) -> bool: + try: + linters.run_linter(linter, ['--version'], suppress_output=True) + except subprocess.CalledProcessError: + iotests.case_notrun(f"'{linter}' not found") return False - - if filename.endswith('.py'): - return True - - with open(filename, encoding='utf-8') as f: - try: - first_line = f.readline() - return re.match('^#!.*python', first_line) is not None - except UnicodeDecodeError: # Ignore binary files - return False + return True -def run_linters(): - named_tests = [f'tests/{entry}' for entry in os.listdir('tests')] - check_tests = set(os.listdir('.') + named_tests) - set(SKIP_FILES) - files = [filename for filename in check_tests if is_python_file(filename)] +def test_pylint(files: List[str]) -> None: + print('=== pylint ===') + sys.stdout.flush() + + if not check_linter('pylint'): + return + + linters.run_linter('pylint', files) + + +def test_mypy(files: List[str]) -> None: + print('=== mypy ===') + sys.stdout.flush() + + if not check_linter('mypy'): + return + + env = os.environ.copy() + env['MYPYPATH'] = env['PYTHONPATH'] + + linters.run_linter('mypy', files, env=env, suppress_output=True) + + +def main() -> None: + files = linters.get_test_files() iotests.logger.debug('Files to be checked:') iotests.logger.debug(', '.join(sorted(files))) - print('=== pylint ===') - sys.stdout.flush() - - # Todo notes are fine, but fixme's or xxx's should probably just be - # fixed (in tests, at least) - env = os.environ.copy() - qemu_module_path = os.path.join(os.path.dirname(__file__), - '..', '..', 'python') - try: - env['PYTHONPATH'] += os.pathsep + qemu_module_path - except KeyError: - env['PYTHONPATH'] = qemu_module_path - subprocess.run(('pylint-3', '--score=n', '--notes=FIXME,XXX', *files), - env=env, check=False) - - print('=== mypy ===') - sys.stdout.flush() - - # We have to call mypy separately for each file. Otherwise, it - # will interpret all given files as belonging together (i.e., they - # may not both define the same classes, etc.; most notably, they - # must not both define the __main__ module). - env['MYPYPATH'] = env['PYTHONPATH'] - for filename in files: - p = subprocess.run(('mypy', - '--warn-unused-configs', - '--disallow-subclassing-any', - '--disallow-any-generics', - '--disallow-incomplete-defs', - '--disallow-untyped-decorators', - '--no-implicit-optional', - '--warn-redundant-casts', - '--warn-unused-ignores', - '--no-implicit-reexport', - '--namespace-packages', - filename), - env=env, - check=False, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True) - - if p.returncode != 0: - print(p.stdout) + for test in (test_pylint, test_mypy): + try: + test(files) + except subprocess.CalledProcessError as exc: + # Linter failure will be caught by diffing the IO. + if exc.output: + print(exc.output) -for linter in ('pylint-3', 'mypy'): - if shutil.which(linter) is None: - iotests.notrun(f'{linter} not found') - -iotests.script_main(run_linters) +iotests.script_main(main) diff --git a/tests/qemu-iotests/300 b/tests/qemu-iotests/300 index fe94de84ed..dbd28384ec 100755 --- a/tests/qemu-iotests/300 +++ b/tests/qemu-iotests/300 @@ -26,9 +26,6 @@ from typing import Dict, List, Optional import iotests -# Import qemu after iotests.py has amended sys.path -# pylint: disable=wrong-import-order -from qemu.machine import machine BlockBitmapMapping = List[Dict[str, object]] @@ -462,12 +459,11 @@ class TestBlockBitmapMappingErrors(TestDirtyBitmapMigration): f"'{self.src_node_name}': Name is longer than 255 bytes", log) - # Expect abnormal shutdown of the destination VM because of - # the failed migration - try: - self.vm_b.shutdown() - except machine.AbnormalShutdown: - pass + # Destination VM will terminate w/ error of its own accord + # due to the failed migration. + self.vm_b.wait() + rc = self.vm_b.exitcode() + assert rc is not None and rc > 0 def test_aliased_bitmap_name_too_long(self) -> None: # Longer than the maximum for bitmap names diff --git a/tests/qemu-iotests/308 b/tests/qemu-iotests/308 index 6b386bd523..2e3f8f4282 100755 --- a/tests/qemu-iotests/308 +++ b/tests/qemu-iotests/308 @@ -148,7 +148,7 @@ rmdir "$EXT_MP" 2>/dev/null rm -f "$EXT_MP" output=$(fuse_export_add 'export-err' "'mountpoint': '$EXT_MP'" error) -if echo "$output" | grep -q "Invalid parameter 'fuse'"; then +if echo "$output" | grep -q "Parameter 'type' does not accept value 'fuse'"; then _notrun 'No FUSE support' fi diff --git a/tests/qemu-iotests/310 b/tests/qemu-iotests/310 index 9d9c928c4b..33c3411869 100755 --- a/tests/qemu-iotests/310 +++ b/tests/qemu-iotests/310 @@ -48,11 +48,11 @@ with iotests.FilePath('base.img') as base_img_path, \ assert qemu_io_silent(base_img_path, '-c', 'write -P 1 3M 1M') == 0 assert qemu_img('create', '-f', iotests.imgfmt, '-b', base_img_path, '-F', iotests.imgfmt, mid_img_path) == 0 - assert qemu_io_silent(mid_img_path, '-c', 'write -P 3 2M 1M') == 0 - assert qemu_io_silent(mid_img_path, '-c', 'write -P 3 4M 1M') == 0 + assert qemu_io_silent(mid_img_path, '-c', 'write -P 3 2M 1M') == 0 + assert qemu_io_silent(mid_img_path, '-c', 'write -P 3 4M 1M') == 0 assert qemu_img('create', '-f', iotests.imgfmt, '-b', mid_img_path, '-F', iotests.imgfmt, top_img_path) == 0 - assert qemu_io_silent(top_img_path, '-c', 'write -P 2 1M 1M') == 0 + assert qemu_io_silent(top_img_path, '-c', 'write -P 2 1M 1M') == 0 # 0 1 2 3 4 # top 2 @@ -108,10 +108,10 @@ with iotests.FilePath('base.img') as base_img_path, \ assert qemu_img('rebase', '-u', '-b', '', '-f', iotests.imgfmt, top_img_path) == 0 - assert qemu_io_silent(top_img_path, '-c', 'read -P 0 0 1M') == 0 - assert qemu_io_silent(top_img_path, '-c', 'read -P 2 1M 1M') == 0 - assert qemu_io_silent(top_img_path, '-c', 'read -P 3 2M 1M') == 0 - assert qemu_io_silent(top_img_path, '-c', 'read -P 0 3M 1M') == 0 - assert qemu_io_silent(top_img_path, '-c', 'read -P 3 4M 1M') == 0 + assert qemu_io_silent(top_img_path, '-c', 'read -P 0 0 1M') == 0 + assert qemu_io_silent(top_img_path, '-c', 'read -P 2 1M 1M') == 0 + assert qemu_io_silent(top_img_path, '-c', 'read -P 3 2M 1M') == 0 + assert qemu_io_silent(top_img_path, '-c', 'read -P 0 3M 1M') == 0 + assert qemu_io_silent(top_img_path, '-c', 'read -P 3 4M 1M') == 0 log('Done') diff --git a/tests/qemu-iotests/check b/tests/qemu-iotests/check index da1bfb839e..43a4b694cc 100755 --- a/tests/qemu-iotests/check +++ b/tests/qemu-iotests/check @@ -37,13 +37,14 @@ def make_argparser() -> argparse.ArgumentParser: p.add_argument('-d', dest='debug', action='store_true', help='debug') p.add_argument('-p', dest='print', action='store_true', - help='redirects qemu\'s stdout and stderr to the test output') + help='redirects qemu\'s stdout and stderr to ' + 'the test output') p.add_argument('-gdb', action='store_true', - help="start gdbserver with $GDB_OPTIONS options \ - ('localhost:12345' if $GDB_OPTIONS is empty)") + help="start gdbserver with $GDB_OPTIONS options " + "('localhost:12345' if $GDB_OPTIONS is empty)") p.add_argument('-valgrind', action='store_true', - help='use valgrind, sets VALGRIND_QEMU environment ' - 'variable') + help='use valgrind, sets VALGRIND_QEMU environment ' + 'variable') p.add_argument('-misalign', action='store_true', help='misalign memory allocations') diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py index ce06cf5630..83bfedb902 100644 --- a/tests/qemu-iotests/iotests.py +++ b/tests/qemu-iotests/iotests.py @@ -30,14 +30,12 @@ import struct import subprocess import sys import time -from typing import (Any, Callable, Dict, Iterable, +from typing import (Any, Callable, Dict, Iterable, Iterator, List, Optional, Sequence, TextIO, Tuple, Type, TypeVar) import unittest from contextlib import contextmanager -# pylint: disable=import-error, wrong-import-position -sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python')) from qemu.machine import qtest from qemu.qmp import QMPMessage @@ -109,8 +107,6 @@ if os.environ.get('VALGRIND_QEMU') == "y" and \ qemu_valgrind = ['valgrind', valgrind_logfile, '--error-exitcode=99'] -socket_scm_helper = os.environ.get('SOCKET_SCM_HELPER', 'socket_scm_helper') - luks_default_secret_object = 'secret,id=keysec0,data=' + \ os.environ.get('IMGKEYSECRET', '') luks_default_key_secret_opt = 'key-secret=keysec0' @@ -118,6 +114,24 @@ luks_default_key_secret_opt = 'key-secret=keysec0' sample_img_dir = os.environ['SAMPLE_IMG_DIR'] +@contextmanager +def change_log_level( + logger_name: str, level: int = logging.CRITICAL) -> Iterator[None]: + """ + Utility function for temporarily changing the log level of a logger. + + This can be used to silence errors that are expected or uninteresting. + """ + _logger = logging.getLogger(logger_name) + current_level = _logger.level + _logger.setLevel(level) + + try: + yield + finally: + _logger.setLevel(current_level) + + def unarchive_sample_image(sample, fname): sample_fname = os.path.join(sample_img_dir, sample + '.bz2') with bz2.open(sample_fname) as f_in, open(fname, 'wb') as f_out: @@ -600,7 +614,6 @@ class VM(qtest.QEMUQtestMachine): super().__init__(qemu_prog, qemu_opts, wrapper=wrapper, name=name, base_temp_dir=test_dir, - socket_scm_helper=socket_scm_helper, sock_dir=sock_dir, qmp_timer=timer) self._num_drives = 0 @@ -608,7 +621,7 @@ class VM(qtest.QEMUQtestMachine): super()._post_shutdown() if not qemu_valgrind or not self._popen: return - valgrind_filename = f"{test_dir}/{self._popen.pid}.valgrind" + valgrind_filename = f"{test_dir}/{self._popen.pid}.valgrind" if self.exitcode() == 99: with open(valgrind_filename, encoding='utf-8') as f: print(f.read()) @@ -1350,8 +1363,9 @@ class ReproducibleStreamWrapper: class ReproducibleTestRunner(unittest.TextTestRunner): def __init__(self, stream: Optional[TextIO] = None, - resultclass: Type[unittest.TestResult] = ReproducibleTestResult, - **kwargs: Any) -> None: + resultclass: Type[unittest.TestResult] = + ReproducibleTestResult, + **kwargs: Any) -> None: rstream = ReproducibleStreamWrapper(stream or sys.stdout) super().__init__(stream=rstream, # type: ignore descriptions=True, diff --git a/tests/qemu-iotests/linters.py b/tests/qemu-iotests/linters.py new file mode 100644 index 0000000000..65c4c4e827 --- /dev/null +++ b/tests/qemu-iotests/linters.py @@ -0,0 +1,105 @@ +# Copyright (C) 2020 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +import re +import subprocess +import sys +from typing import List, Mapping, Optional + + +# TODO: Empty this list! +SKIP_FILES = ( + '030', '040', '041', '044', '045', '055', '056', '057', '065', '093', + '096', '118', '124', '132', '136', '139', '147', '148', '149', + '151', '152', '155', '163', '165', '194', '196', '202', + '203', '205', '206', '207', '208', '210', '211', '212', '213', '216', + '218', '219', '224', '228', '234', '235', '236', '237', '238', + '240', '242', '245', '246', '248', '255', '256', '257', '258', '260', + '262', '264', '266', '274', '277', '280', '281', '295', '296', '298', + '299', '302', '303', '304', '307', + 'nbd-fault-injector.py', 'qcow2.py', 'qcow2_format.py', 'qed.py' +) + + +def is_python_file(filename): + if not os.path.isfile(filename): + return False + + if filename.endswith('.py'): + return True + + with open(filename, encoding='utf-8') as f: + try: + first_line = f.readline() + return re.match('^#!.*python', first_line) is not None + except UnicodeDecodeError: # Ignore binary files + return False + + +def get_test_files() -> List[str]: + named_tests = [f'tests/{entry}' for entry in os.listdir('tests')] + check_tests = set(os.listdir('.') + named_tests) - set(SKIP_FILES) + return list(filter(is_python_file, check_tests)) + + +def run_linter( + tool: str, + args: List[str], + env: Optional[Mapping[str, str]] = None, + suppress_output: bool = False, +) -> None: + """ + Run a python-based linting tool. + + :param suppress_output: If True, suppress all stdout/stderr output. + :raise CalledProcessError: If the linter process exits with failure. + """ + subprocess.run( + ('python3', '-m', tool, *args), + env=env, + check=True, + stdout=subprocess.PIPE if suppress_output else None, + stderr=subprocess.STDOUT if suppress_output else None, + universal_newlines=True, + ) + + +def main() -> None: + """ + Used by the Python CI system as an entry point to run these linters. + """ + def show_usage() -> None: + print(f"Usage: {sys.argv[0]} < --mypy | --pylint >", file=sys.stderr) + sys.exit(1) + + if len(sys.argv) != 2: + show_usage() + + files = get_test_files() + + if sys.argv[1] == '--pylint': + run_linter('pylint', files) + elif sys.argv[1] == '--mypy': + # mypy bug #9852; disable incremental checking as a workaround. + args = ['--no-incremental'] + files + run_linter('mypy', args) + else: + print(f"Unrecognized argument: '{sys.argv[1]}'", file=sys.stderr) + show_usage() + + +if __name__ == '__main__': + main() diff --git a/tests/qemu-iotests/meson.build b/tests/qemu-iotests/meson.build deleted file mode 100644 index 67aed1e492..0000000000 --- a/tests/qemu-iotests/meson.build +++ /dev/null @@ -1,5 +0,0 @@ -if 'CONFIG_LINUX' in config_host - socket_scm_helper = executable('socket_scm_helper', 'socket_scm_helper.c') -else - socket_scm_helper = [] -endif diff --git a/tests/qemu-iotests/mypy.ini b/tests/qemu-iotests/mypy.ini new file mode 100644 index 0000000000..4c0339f558 --- /dev/null +++ b/tests/qemu-iotests/mypy.ini @@ -0,0 +1,12 @@ +[mypy] +disallow_any_generics = True +disallow_incomplete_defs = True +disallow_subclassing_any = True +disallow_untyped_decorators = True +implicit_reexport = False +namespace_packages = True +no_implicit_optional = True +scripts_are_modules = True +warn_redundant_casts = True +warn_unused_configs = True +warn_unused_ignores = True diff --git a/tests/qemu-iotests/pylintrc b/tests/qemu-iotests/pylintrc index f2c0b522ac..32ab77b8bb 100644 --- a/tests/qemu-iotests/pylintrc +++ b/tests/qemu-iotests/pylintrc @@ -19,13 +19,33 @@ disable=invalid-name, too-many-public-methods, # pylint warns about Optional[] etc. as unsubscriptable in 3.9 unsubscriptable-object, + # pylint's static analysis causes false positivies for file_path(); + # If we really care to make it statically knowable, we'll use mypy. + unbalanced-tuple-unpacking, # Sometimes we need to disable a newly introduced pylint warning. # Doing so should not produce a warning in older versions of pylint. bad-option-value, # These are temporary, and should be removed: missing-docstring, too-many-return-statements, - too-many-statements + too-many-statements, + consider-using-f-string, + + +[REPORTS] + +# Activate the evaluation score. +score=no + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +# TODO notes are fine, but FIXMEs or XXXs should probably just be +# fixed (in tests, at least). +notes=FIXME, + XXX, + [FORMAT] diff --git a/tests/qemu-iotests/socket_scm_helper.c b/tests/qemu-iotests/socket_scm_helper.c deleted file mode 100644 index eb76d31aa9..0000000000 --- a/tests/qemu-iotests/socket_scm_helper.c +++ /dev/null @@ -1,136 +0,0 @@ -/* - * SCM_RIGHTS with unix socket help program for test - * - * Copyright IBM, Inc. 2013 - * - * Authors: - * Wenchao Xia - * - * This work is licensed under the terms of the GNU LGPL, version 2 or later. - * See the COPYING.LIB file in the top-level directory. - */ - -#include "qemu/osdep.h" -#include -#include - -/* #define SOCKET_SCM_DEBUG */ - -/* - * @fd and @fd_to_send will not be checked for validation in this function, - * a blank will be sent as iov data to notify qemu. - */ -static int send_fd(int fd, int fd_to_send) -{ - struct msghdr msg; - struct iovec iov[1]; - int ret; - char control[CMSG_SPACE(sizeof(int))]; - struct cmsghdr *cmsg; - - memset(&msg, 0, sizeof(msg)); - memset(control, 0, sizeof(control)); - - /* Send a blank to notify qemu */ - iov[0].iov_base = (void *)" "; - iov[0].iov_len = 1; - - msg.msg_iov = iov; - msg.msg_iovlen = 1; - - msg.msg_control = control; - msg.msg_controllen = sizeof(control); - - cmsg = CMSG_FIRSTHDR(&msg); - - cmsg->cmsg_len = CMSG_LEN(sizeof(int)); - cmsg->cmsg_level = SOL_SOCKET; - cmsg->cmsg_type = SCM_RIGHTS; - memcpy(CMSG_DATA(cmsg), &fd_to_send, sizeof(int)); - - do { - ret = sendmsg(fd, &msg, 0); - } while (ret < 0 && errno == EINTR); - - if (ret < 0) { - fprintf(stderr, "Failed to send msg, reason: %s\n", strerror(errno)); - } - - return ret; -} - -/* Convert string to fd number. */ -static int get_fd_num(const char *fd_str, bool silent) -{ - int sock; - char *err; - - errno = 0; - sock = strtol(fd_str, &err, 10); - if (errno) { - if (!silent) { - fprintf(stderr, "Failed in strtol for socket fd, reason: %s\n", - strerror(errno)); - } - return -1; - } - if (!*fd_str || *err || sock < 0) { - if (!silent) { - fprintf(stderr, "bad numerical value for socket fd '%s'\n", fd_str); - } - return -1; - } - - return sock; -} - -/* - * To make things simple, the caller needs to specify: - * 1. socket fd. - * 2. path of the file to be sent. - */ -int main(int argc, char **argv, char **envp) -{ - int sock, fd, ret; - -#ifdef SOCKET_SCM_DEBUG - int i; - for (i = 0; i < argc; i++) { - fprintf(stderr, "Parameter %d: %s\n", i, argv[i]); - } -#endif - - if (argc != 3) { - fprintf(stderr, - "Usage: %s < socket-fd > < file-path >\n", - argv[0]); - return EXIT_FAILURE; - } - - - sock = get_fd_num(argv[1], false); - if (sock < 0) { - return EXIT_FAILURE; - } - - fd = get_fd_num(argv[2], true); - if (fd < 0) { - /* Now only open a file in readonly mode for test purpose. If more - precise control is needed, use python script in file operation, which - is supposed to fork and exec this program. */ - fd = open(argv[2], O_RDONLY); - if (fd < 0) { - fprintf(stderr, "Failed to open file '%s'\n", argv[2]); - return EXIT_FAILURE; - } - } - - ret = send_fd(sock, fd); - if (ret < 0) { - close(fd); - return EXIT_FAILURE; - } - - close(fd); - return EXIT_SUCCESS; -} diff --git a/tests/qemu-iotests/testenv.py b/tests/qemu-iotests/testenv.py index 70da0d60c8..c33454fa68 100644 --- a/tests/qemu-iotests/testenv.py +++ b/tests/qemu-iotests/testenv.py @@ -68,7 +68,7 @@ class TestEnv(ContextManager['TestEnv']): env_variables = ['PYTHONPATH', 'TEST_DIR', 'SOCK_DIR', 'SAMPLE_IMG_DIR', 'OUTPUT_DIR', 'PYTHON', 'QEMU_PROG', 'QEMU_IMG_PROG', 'QEMU_IO_PROG', 'QEMU_NBD_PROG', 'QSD_PROG', - 'SOCKET_SCM_HELPER', 'QEMU_OPTIONS', 'QEMU_IMG_OPTIONS', + 'QEMU_OPTIONS', 'QEMU_IMG_OPTIONS', 'QEMU_IO_OPTIONS', 'QEMU_IO_OPTIONS_NO_FMT', 'QEMU_NBD_OPTIONS', 'IMGOPTS', 'IMGFMT', 'IMGPROTO', 'AIOMODE', 'CACHEMODE', 'VALGRIND_QEMU', @@ -108,12 +108,15 @@ class TestEnv(ContextManager['TestEnv']): SAMPLE_IMG_DIR OUTPUT_DIR """ - self.pythonpath = os.getenv('PYTHONPATH') - if self.pythonpath: - self.pythonpath = self.source_iotests + os.pathsep + \ - self.pythonpath - else: - self.pythonpath = self.source_iotests + + # Path where qemu goodies live in this source tree. + qemu_srctree_path = Path(__file__, '../../../python').resolve() + + self.pythonpath = os.pathsep.join(filter(None, ( + self.source_iotests, + str(qemu_srctree_path), + os.getenv('PYTHONPATH'), + ))) self.test_dir = os.getenv('TEST_DIR', os.path.join(os.getcwd(), 'scratch')) @@ -137,7 +140,6 @@ class TestEnv(ContextManager['TestEnv']): """Init binary path variables: PYTHON (for bash tests) QEMU_PROG, QEMU_IMG_PROG, QEMU_IO_PROG, QEMU_NBD_PROG, QSD_PROG - SOCKET_SCM_HELPER """ self.python = sys.executable @@ -171,10 +173,6 @@ class TestEnv(ContextManager['TestEnv']): if not isxfile(b): sys.exit('Not executable: ' + b) - helper_path = os.path.join(self.build_iotests, 'socket_scm_helper') - if isxfile(helper_path): - self.socket_scm_helper = helper_path # SOCKET_SCM_HELPER - def __init__(self, imgfmt: str, imgproto: str, aiomode: str, cachemode: Optional[str] = None, imgopts: Optional[str] = None, @@ -300,7 +298,6 @@ IMGPROTO -- {IMGPROTO} PLATFORM -- {platform} TEST_DIR -- {TEST_DIR} SOCK_DIR -- {SOCK_DIR} -SOCKET_SCM_HELPER -- {SOCKET_SCM_HELPER} GDB_OPTIONS -- {GDB_OPTIONS} VALGRIND_QEMU -- {VALGRIND_QEMU} PRINT_QEMU_OUTPUT -- {PRINT_QEMU} diff --git a/tests/qemu-iotests/testrunner.py b/tests/qemu-iotests/testrunner.py index 4a6ec421ed..0e29c2fddd 100644 --- a/tests/qemu-iotests/testrunner.py +++ b/tests/qemu-iotests/testrunner.py @@ -266,12 +266,13 @@ class TestRunner(ContextManager['TestRunner']): diff=file_diff(str(f_reference), str(f_bad))) if f_notrun.exists(): - return TestResult(status='not run', - description=f_notrun.read_text().strip()) + return TestResult( + status='not run', + description=f_notrun.read_text(encoding='utf-8').strip()) casenotrun = '' if f_casenotrun.exists(): - casenotrun = f_casenotrun.read_text() + casenotrun = f_casenotrun.read_text(encoding='utf-8') diff = file_diff(str(f_reference), str(f_bad)) if diff: @@ -340,6 +341,7 @@ class TestRunner(ContextManager['TestRunner']): elif res.status == 'not run': notrun.append(name) + sys.stdout.flush() if res.interrupted: break diff --git a/tests/qemu-iotests/tests/image-fleecing b/tests/qemu-iotests/tests/image-fleecing index f6318492c6..a58b5a1781 100755 --- a/tests/qemu-iotests/tests/image-fleecing +++ b/tests/qemu-iotests/tests/image-fleecing @@ -28,6 +28,7 @@ from iotests import log, qemu_img, qemu_io, qemu_io_silent iotests.script_initialize( supported_fmts=['qcow2', 'qcow', 'qed', 'vmdk', 'vhdx', 'raw'], supported_platforms=['linux'], + required_fmts=['copy-before-write'], ) patterns = [('0x5d', '0', '64k'), @@ -114,8 +115,8 @@ def do_test(use_cbw, base_img_path, fleece_img_path, nbd_sock_path, vm): nbd_uri = 'nbd+unix:///%s?socket=%s' % (tmp_node, nbd_sock_path) log(vm.qmp('nbd-server-start', - {'addr': { 'type': 'unix', - 'data': { 'path': nbd_sock_path } } })) + {'addr': {'type': 'unix', + 'data': {'path': nbd_sock_path}}})) log(vm.qmp('nbd-server-add', device=tmp_node)) diff --git a/tests/qemu-iotests/tests/migrate-bitmaps-test b/tests/qemu-iotests/tests/migrate-bitmaps-test index dc431c35b3..c23df3d75c 100755 --- a/tests/qemu-iotests/tests/migrate-bitmaps-test +++ b/tests/qemu-iotests/tests/migrate-bitmaps-test @@ -19,10 +19,11 @@ # along with this program. If not, see . # -import os import itertools import operator +import os import re + import iotests from iotests import qemu_img, qemu_img_create, Timeout @@ -224,25 +225,6 @@ def inject_test_case(klass, suffix, method, *args, **kwargs): setattr(klass, 'test_' + method + suffix, lambda self: mc(self)) -for cmb in list(itertools.product((True, False), repeat=5)): - name = ('_' if cmb[0] else '_not_') + 'persistent_' - name += ('_' if cmb[1] else '_not_') + 'migbitmap_' - name += '_online' if cmb[2] else '_offline' - name += '_shared' if cmb[3] else '_nonshared' - if cmb[4]: - name += '__pre_shutdown' - - inject_test_case(TestDirtyBitmapMigration, name, 'do_test_migration', - *list(cmb)) - -for cmb in list(itertools.product((True, False), repeat=2)): - name = ('_' if cmb[0] else '_not_') + 'persistent_' - name += ('_' if cmb[1] else '_not_') + 'migbitmap' - - inject_test_case(TestDirtyBitmapMigration, name, - 'do_test_migration_resume_source', *list(cmb)) - - class TestDirtyBitmapBackingMigration(iotests.QMPTestCase): def setUp(self): qemu_img_create('-f', iotests.imgfmt, base_a, size) @@ -304,6 +286,30 @@ class TestDirtyBitmapBackingMigration(iotests.QMPTestCase): self.assert_qmp(result, 'return', {}) +def main() -> None: + for cmb in list(itertools.product((True, False), repeat=5)): + name = ('_' if cmb[0] else '_not_') + 'persistent_' + name += ('_' if cmb[1] else '_not_') + 'migbitmap_' + name += '_online' if cmb[2] else '_offline' + name += '_shared' if cmb[3] else '_nonshared' + if cmb[4]: + name += '__pre_shutdown' + + inject_test_case(TestDirtyBitmapMigration, name, 'do_test_migration', + *list(cmb)) + + for cmb in list(itertools.product((True, False), repeat=2)): + name = ('_' if cmb[0] else '_not_') + 'persistent_' + name += ('_' if cmb[1] else '_not_') + 'migbitmap' + + inject_test_case(TestDirtyBitmapMigration, name, + 'do_test_migration_resume_source', *list(cmb)) + + iotests.main( + supported_fmts=['qcow2'], + supported_protocols=['file'] + ) + + if __name__ == '__main__': - iotests.main(supported_fmts=['qcow2'], - supported_protocols=['file']) + main() diff --git a/tests/qemu-iotests/tests/mirror-ready-cancel-error b/tests/qemu-iotests/tests/mirror-ready-cancel-error new file mode 100755 index 0000000000..f2dc88881f --- /dev/null +++ b/tests/qemu-iotests/tests/mirror-ready-cancel-error @@ -0,0 +1,143 @@ +#!/usr/bin/env python3 +# group: rw quick +# +# Test what happens when errors occur to a mirror job after it has +# been cancelled in the READY phase +# +# Copyright (C) 2021 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import os +import iotests + + +image_size = 1 * 1024 * 1024 +source = os.path.join(iotests.test_dir, 'source.img') +target = os.path.join(iotests.test_dir, 'target.img') + + +class TestMirrorReadyCancelError(iotests.QMPTestCase): + def setUp(self) -> None: + assert iotests.qemu_img_create('-f', iotests.imgfmt, source, + str(image_size)) == 0 + assert iotests.qemu_img_create('-f', iotests.imgfmt, target, + str(image_size)) == 0 + + self.vm = iotests.VM() + self.vm.launch() + + def tearDown(self) -> None: + self.vm.shutdown() + os.remove(source) + os.remove(target) + + def add_blockdevs(self, once: bool) -> None: + res = self.vm.qmp('blockdev-add', + **{'node-name': 'source', + 'driver': iotests.imgfmt, + 'file': { + 'driver': 'file', + 'filename': source + }}) + self.assert_qmp(res, 'return', {}) + + # blkdebug notes: + # Enter state 2 on the first flush, which happens before the + # job enters the READY state. The second flush will happen + # when the job is about to complete, and we want that one to + # fail. + res = self.vm.qmp('blockdev-add', + **{'node-name': 'target', + 'driver': iotests.imgfmt, + 'file': { + 'driver': 'blkdebug', + 'image': { + 'driver': 'file', + 'filename': target + }, + 'set-state': [{ + 'event': 'flush_to_disk', + 'state': 1, + 'new_state': 2 + }], + 'inject-error': [{ + 'event': 'flush_to_disk', + 'once': once, + 'immediately': True, + 'state': 2 + }]}}) + self.assert_qmp(res, 'return', {}) + + def start_mirror(self) -> None: + res = self.vm.qmp('blockdev-mirror', + job_id='mirror', + device='source', + target='target', + filter_node_name='mirror-top', + sync='full', + on_target_error='stop') + self.assert_qmp(res, 'return', {}) + + def cancel_mirror_with_error(self) -> None: + self.vm.event_wait('BLOCK_JOB_READY') + + # Write something so will not leave the job immediately, but + # flush first (which will fail, thanks to blkdebug) + res = self.vm.qmp('human-monitor-command', + command_line='qemu-io mirror-top "write 0 64k"') + self.assert_qmp(res, 'return', '') + + # Drain status change events + while self.vm.event_wait('JOB_STATUS_CHANGE', timeout=0.0) is not None: + pass + + res = self.vm.qmp('block-job-cancel', device='mirror') + self.assert_qmp(res, 'return', {}) + + self.vm.event_wait('BLOCK_JOB_ERROR') + + def test_transient_error(self) -> None: + self.add_blockdevs(True) + self.start_mirror() + self.cancel_mirror_with_error() + + while True: + e = self.vm.event_wait('JOB_STATUS_CHANGE') + if e['data']['status'] == 'standby': + # Transient error, try again + self.vm.qmp('block-job-resume', device='mirror') + elif e['data']['status'] == 'null': + break + + def test_persistent_error(self) -> None: + self.add_blockdevs(False) + self.start_mirror() + self.cancel_mirror_with_error() + + while True: + e = self.vm.event_wait('JOB_STATUS_CHANGE') + if e['data']['status'] == 'standby': + # Persistent error, no point in continuing + self.vm.qmp('block-job-cancel', device='mirror', force=True) + elif e['data']['status'] == 'null': + break + + +if __name__ == '__main__': + # LUKS would require special key-secret handling in add_blockdevs() + iotests.main(supported_fmts=['generic'], + unsupported_fmts=['luks'], + supported_protocols=['file']) diff --git a/tests/qemu-iotests/tests/mirror-ready-cancel-error.out b/tests/qemu-iotests/tests/mirror-ready-cancel-error.out new file mode 100644 index 0000000000..fbc63e62f8 --- /dev/null +++ b/tests/qemu-iotests/tests/mirror-ready-cancel-error.out @@ -0,0 +1,5 @@ +.. +---------------------------------------------------------------------- +Ran 2 tests + +OK diff --git a/tests/qemu-iotests/tests/mirror-top-perms b/tests/qemu-iotests/tests/mirror-top-perms index 2fc8dd66e0..0a51a613f3 100755 --- a/tests/qemu-iotests/tests/mirror-top-perms +++ b/tests/qemu-iotests/tests/mirror-top-perms @@ -20,12 +20,13 @@ # import os -import iotests -from iotests import qemu_img -# Import qemu after iotests.py has amended sys.path -# pylint: disable=wrong-import-order -import qemu +from qemu.aqmp import ConnectError +from qemu.machine import machine +from qemu.qmp import QMPConnectError + +import iotests +from iotests import change_log_level, qemu_img image_size = 1 * 1024 * 1024 @@ -47,7 +48,7 @@ class TestMirrorTopPerms(iotests.QMPTestCase): def tearDown(self): try: self.vm.shutdown() - except qemu.machine.machine.AbnormalShutdown: + except machine.AbnormalShutdown: pass if self.vm_b is not None: @@ -99,10 +100,14 @@ class TestMirrorTopPerms(iotests.QMPTestCase): self.vm_b.add_blockdev(f'file,node-name=drive0,filename={source}') self.vm_b.add_device('virtio-blk,drive=drive0,share-rw=on') try: - self.vm_b.launch() - print('ERROR: VM B launched successfully, this should not have ' - 'happened') - except qemu.qmp.QMPConnectError: + # Silence AQMP errors temporarily. + # TODO: Remove this and just allow the errors to be logged when + # AQMP fully replaces QMP. + with change_log_level('qemu.aqmp'): + self.vm_b.launch() + print('ERROR: VM B launched successfully, ' + 'this should not have happened') + except (QMPConnectError, ConnectError): assert 'Is another process using the image' in self.vm_b.get_log() result = self.vm.qmp('block-job-cancel', diff --git a/tests/qemu-iotests/tests/qsd-jobs.out b/tests/qemu-iotests/tests/qsd-jobs.out index 189423354b..c1bc9b8356 100644 --- a/tests/qemu-iotests/tests/qsd-jobs.out +++ b/tests/qemu-iotests/tests/qsd-jobs.out @@ -8,7 +8,7 @@ QMP_VERSION {"return": {}} {"return": {}} {"return": {}} -{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_COMPLETED", "data": {"device": "job0", "len": 0, "offset": 0, "speed": 0, "type": "commit"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "BLOCK_JOB_CANCELLED", "data": {"device": "job0", "len": 0, "offset": 0, "speed": 0, "type": "commit"}} === Streaming can't get permission on base node === diff --git a/tests/qtest/acpi-utils.c b/tests/qtest/acpi-utils.c index d2a202efca..766c48e3a6 100644 --- a/tests/qtest/acpi-utils.c +++ b/tests/qtest/acpi-utils.c @@ -98,6 +98,20 @@ void acpi_fetch_table(QTestState *qts, uint8_t **aml, uint32_t *aml_len, ACPI_ASSERT_CMP(**aml, sig); } if (verify_checksum) { + if (acpi_calc_checksum(*aml, *aml_len)) { + gint fd, ret; + char *fname = NULL; + GError *error = NULL; + + fprintf(stderr, "Invalid '%.4s'(%d)\n", *aml, *aml_len); + fd = g_file_open_tmp("malformed-XXXXXX.dat", &fname, &error); + g_assert_no_error(error); + fprintf(stderr, "Dumping invalid table into '%s'\n", fname); + ret = qemu_write_full(fd, *aml, *aml_len); + g_assert(ret == *aml_len); + close(fd); + g_free(fname); + } g_assert(!acpi_calc_checksum(*aml, *aml_len)); } } diff --git a/tests/qtest/am53c974-test.c b/tests/qtest/am53c974-test.c index d996866cd4..9b1e4211bd 100644 --- a/tests/qtest/am53c974-test.c +++ b/tests/qtest/am53c974-test.c @@ -189,6 +189,40 @@ static void test_cancelled_request_ok(void) qtest_quit(s); } +static void test_inflight_cancel_ok(void) +{ + QTestState *s = qtest_init( + "-device am53c974,id=scsi " + "-device scsi-hd,drive=disk0 -drive " + "id=disk0,if=none,file=null-co://,format=raw -nodefaults"); + qtest_outl(s, 0xcf8, 0x80001000); + qtest_inw(s, 0xcfc); + qtest_outl(s, 0xcf8, 0x80001010); + qtest_outl(s, 0xcfc, 0xffffffff); + qtest_outl(s, 0xcf8, 0x80001010); + qtest_inl(s, 0xcfc); + qtest_outl(s, 0xcf8, 0x80001010); + qtest_outl(s, 0xcfc, 0xc001); + qtest_outl(s, 0xcf8, 0x80001004); + qtest_inw(s, 0xcfc); + qtest_outl(s, 0xcf8, 0x80001004); + qtest_outw(s, 0xcfc, 0x7); + qtest_outl(s, 0xcf8, 0x80001004); + qtest_inw(s, 0xcfc); + qtest_inb(s, 0xc000); + qtest_outb(s, 0xc008, 0x8); + qtest_outw(s, 0xc00b, 0x4100); + qtest_outb(s, 0xc009, 0x0); + qtest_outb(s, 0xc009, 0x0); + qtest_outw(s, 0xc00b, 0xc212); + qtest_outl(s, 0xc042, 0x2c2c5a88); + qtest_outw(s, 0xc00b, 0xc212); + qtest_outw(s, 0xc00b, 0x415a); + qtest_outl(s, 0xc03f, 0x3060303); + qtest_outl(s, 0xc00b, 0x5afa9054); + qtest_quit(s); +} + int main(int argc, char **argv) { const char *arch = qtest_get_arch(); @@ -212,6 +246,8 @@ int main(int argc, char **argv) test_fifo_underflow_on_write_ok); qtest_add_func("am53c974/test_cancelled_request_ok", test_cancelled_request_ok); + qtest_add_func("am53c974/test_inflight_cancel_ok", + test_inflight_cancel_ok); } return g_test_run(); diff --git a/tests/qtest/arm-cpu-features.c b/tests/qtest/arm-cpu-features.c index 90a87f0ea9..f76652143a 100644 --- a/tests/qtest/arm-cpu-features.c +++ b/tests/qtest/arm-cpu-features.c @@ -26,21 +26,6 @@ " 'arguments': { 'type': 'full', " #define QUERY_TAIL "}}" -static bool kvm_enabled(QTestState *qts) -{ - QDict *resp, *qdict; - bool enabled; - - resp = qtest_qmp(qts, "{ 'execute': 'query-kvm' }"); - g_assert(qdict_haskey(resp, "return")); - qdict = qdict_get_qdict(resp, "return"); - g_assert(qdict_haskey(qdict, "enabled")); - enabled = qdict_get_bool(qdict, "enabled"); - qobject_unref(resp); - - return enabled; -} - static QDict *do_query_no_props(QTestState *qts, const char *cpu_type) { return qtest_qmp(qts, QUERY_HEAD "'model': { 'name': %s }" @@ -506,14 +491,6 @@ static void test_query_cpu_model_expansion_kvm(const void *data) qts = qtest_init(MACHINE_KVM "-cpu max"); - /* - * These tests target the 'host' CPU type, so KVM must be enabled. - */ - if (!kvm_enabled(qts)) { - qtest_quit(qts); - return; - } - /* Enabling and disabling kvm-no-adjvtime should always work. */ assert_has_feature_disabled(qts, "host", "kvm-no-adjvtime"); assert_set_feature(qts, "host", "kvm-no-adjvtime", true); @@ -637,7 +614,11 @@ int main(int argc, char **argv) * order avoid attempting to run an AArch32 QEMU with KVM on * AArch64 hosts. That won't work and isn't easy to detect. */ - if (g_str_equal(qtest_get_arch(), "aarch64")) { + if (g_str_equal(qtest_get_arch(), "aarch64") && qtest_has_accel("kvm")) { + /* + * This tests target the 'host' CPU type, so register it only if + * KVM is available. + */ qtest_add_data_func("/arm/kvm/query-cpu-model-expansion", NULL, test_query_cpu_model_expansion_kvm); } diff --git a/tests/qtest/bios-tables-test.c b/tests/qtest/bios-tables-test.c index 4f11d03055..258874167e 100644 --- a/tests/qtest/bios-tables-test.c +++ b/tests/qtest/bios-tables-test.c @@ -271,19 +271,28 @@ static void dump_aml_files(test_data *data, bool rebuild) } } +static bool create_tmp_asl(AcpiSdtTable *sdt) +{ + GError *error = NULL; + gint fd; + + fd = g_file_open_tmp("asl-XXXXXX.dsl", &sdt->asl_file, &error); + g_assert_no_error(error); + close(fd); + + return false; +} + static bool load_asl(GArray *sdts, AcpiSdtTable *sdt) { AcpiSdtTable *temp; GError *error = NULL; GString *command_line = g_string_new(iasl); - gint fd; gchar *out, *out_err; gboolean ret; int i; - fd = g_file_open_tmp("asl-XXXXXX.dsl", &sdt->asl_file, &error); - g_assert_no_error(error); - close(fd); + create_tmp_asl(sdt); /* build command line */ g_string_append_printf(command_line, " -p %s ", sdt->asl_file); @@ -463,11 +472,20 @@ static void test_acpi_asl(test_data *data) err = load_asl(data->tables, sdt); asl = normalize_asl(sdt->asl); - exp_err = load_asl(exp_data.tables, exp_sdt); - exp_asl = normalize_asl(exp_sdt->asl); + /* + * If expected file is empty - it's likely that it was a stub just + * created for step 1 above: we do want to decompile the actual one. + */ + if (exp_sdt->aml_len) { + exp_err = load_asl(exp_data.tables, exp_sdt); + exp_asl = normalize_asl(exp_sdt->asl); + } else { + exp_err = create_tmp_asl(exp_sdt); + exp_asl = g_string_new(""); + } /* TODO: check for warnings */ - g_assert(!err || exp_err); + g_assert(!err || exp_err || !exp_sdt->aml_len); if (g_strcmp0(asl->str, exp_asl->str)) { sdt->tmp_files_retain = true; @@ -722,13 +740,6 @@ static void test_acpi_one(const char *params, test_data *data) char *args; bool use_uefi = data->uefi_fl1 && data->uefi_fl2; -#ifndef CONFIG_TCG - if (data->tcg_only) { - g_test_skip("TCG disabled, skipping ACPI tcg_only test"); - return; - } -#endif /* CONFIG_TCG */ - args = test_acpi_create_args(data, params, use_uefi); data->qts = qtest_init(args); test_acpi_load_tables(data, use_uefi); @@ -859,6 +870,23 @@ static void test_acpi_q35_tcg_bridge(void) free_test_data(&data); } +static void test_acpi_q35_multif_bridge(void) +{ + test_data data = { + .machine = MACHINE_Q35, + .variant = ".multi-bridge", + }; + test_acpi_one("-device pcie-root-port,id=pcie-root-port-0," + "multifunction=on," + "port=0x0,chassis=1,addr=0x2,bus=pcie.0 " + "-device pcie-root-port,id=pcie-root-port-1," + "port=0x1,chassis=2,addr=0x3.0x1,bus=pcie.0 " + "-device virtio-balloon,id=balloon0," + "bus=pcie.0,addr=0x4.0x2", + &data); + free_test_data(&data); +} + static void test_acpi_q35_tcg_mmio64(void) { test_data data = { @@ -1033,6 +1061,19 @@ static void test_acpi_q35_tcg_numamem(void) free_test_data(&data); } +static void test_acpi_q35_kvm_xapic(void) +{ + test_data data; + + memset(&data, 0, sizeof(data)); + data.machine = MACHINE_Q35; + data.variant = ".xapic"; + test_acpi_one(" -object memory-backend-ram,id=ram0,size=128M" + " -numa node -numa node,memdev=ram0" + " -machine kernel-irqchip=on -smp 1,maxcpus=288", &data); + free_test_data(&data); +} + static void test_acpi_q35_tcg_nosmm(void) { test_data data; @@ -1077,6 +1118,30 @@ static void test_acpi_q35_tcg_nohpet(void) free_test_data(&data); } +static void test_acpi_q35_kvm_dmar(void) +{ + test_data data; + + memset(&data, 0, sizeof(data)); + data.machine = MACHINE_Q35; + data.variant = ".dmar"; + test_acpi_one("-machine kernel-irqchip=split -accel kvm" + " -device intel-iommu,intremap=on,device-iotlb=on", &data); + free_test_data(&data); +} + +static void test_acpi_q35_tcg_ivrs(void) +{ + test_data data; + + memset(&data, 0, sizeof(data)); + data.machine = MACHINE_Q35; + data.variant = ".ivrs"; + data.tcg_only = true, + test_acpi_one(" -device amd-iommu", &data); + free_test_data(&data); +} + static void test_acpi_piix4_tcg_numamem(void) { test_data data; @@ -1393,9 +1458,6 @@ static void test_acpi_virt_tcg(void) .scan_len = 128ULL * 1024 * 1024, }; - test_acpi_one("-cpu cortex-a57", &data); - free_test_data(&data); - data.smbios_cpu_max_speed = 2900; data.smbios_cpu_curr_speed = 2700; test_acpi_one("-cpu cortex-a57 " @@ -1509,6 +1571,8 @@ static void test_acpi_oem_fields_virt(void) int main(int argc, char *argv[]) { const char *arch = qtest_get_arch(); + const bool has_kvm = qtest_has_accel("kvm"); + const bool has_tcg = qtest_has_accel("tcg"); int ret; g_test_init(&argc, &argv, NULL); @@ -1534,6 +1598,7 @@ int main(int argc, char *argv[]) test_acpi_piix4_no_acpi_pci_hotplug); qtest_add_func("acpi/q35", test_acpi_q35_tcg); qtest_add_func("acpi/q35/bridge", test_acpi_q35_tcg_bridge); + qtest_add_func("acpi/q35/multif-bridge", test_acpi_q35_multif_bridge); qtest_add_func("acpi/q35/mmio64", test_acpi_q35_tcg_mmio64); qtest_add_func("acpi/piix4/ipmi", test_acpi_piix4_tcg_ipmi); qtest_add_func("acpi/q35/ipmi", test_acpi_q35_tcg_ipmi); @@ -1564,15 +1629,24 @@ int main(int argc, char *argv[]) qtest_add_func("acpi/microvm/rtc", test_acpi_microvm_rtc_tcg); qtest_add_func("acpi/microvm/ioapic2", test_acpi_microvm_ioapic2_tcg); qtest_add_func("acpi/microvm/oem-fields", test_acpi_oem_fields_microvm); - if (strcmp(arch, "x86_64") == 0) { - qtest_add_func("acpi/microvm/pcie", test_acpi_microvm_pcie_tcg); + if (has_tcg) { + qtest_add_func("acpi/q35/ivrs", test_acpi_q35_tcg_ivrs); + if (strcmp(arch, "x86_64") == 0) { + qtest_add_func("acpi/microvm/pcie", test_acpi_microvm_pcie_tcg); + } + } + if (has_kvm) { + qtest_add_func("acpi/q35/kvm/xapic", test_acpi_q35_kvm_xapic); + qtest_add_func("acpi/q35/kvm/dmar", test_acpi_q35_kvm_dmar); } } else if (strcmp(arch, "aarch64") == 0) { - qtest_add_func("acpi/virt", test_acpi_virt_tcg); - qtest_add_func("acpi/virt/numamem", test_acpi_virt_tcg_numamem); - qtest_add_func("acpi/virt/memhp", test_acpi_virt_tcg_memhp); - qtest_add_func("acpi/virt/pxb", test_acpi_virt_tcg_pxb); - qtest_add_func("acpi/virt/oem-fields", test_acpi_oem_fields_virt); + if (has_tcg) { + qtest_add_func("acpi/virt", test_acpi_virt_tcg); + qtest_add_func("acpi/virt/numamem", test_acpi_virt_tcg_numamem); + qtest_add_func("acpi/virt/memhp", test_acpi_virt_tcg_memhp); + qtest_add_func("acpi/virt/pxb", test_acpi_virt_tcg_pxb); + qtest_add_func("acpi/virt/oem-fields", test_acpi_oem_fields_virt); + } } ret = g_test_run(); boot_sector_cleanup(disk); diff --git a/tests/qtest/fuzz/meson.build b/tests/qtest/fuzz/meson.build index 8af6848cd5..189901d4a2 100644 --- a/tests/qtest/fuzz/meson.build +++ b/tests/qtest/fuzz/meson.build @@ -1,3 +1,7 @@ +if not get_option('fuzzing') + subdir_done() +endif + specific_fuzz_ss.add(files('fuzz.c', 'fork_fuzz.c', 'qos_fuzz.c', 'qtest_wrappers.c'), qos) @@ -9,7 +13,7 @@ specific_fuzz_ss.add(when: 'CONFIG_VIRTIO_BLK', if_true: files('virtio_blk_fuzz. specific_fuzz_ss.add(files('generic_fuzz.c')) fork_fuzz = declare_dependency( - link_args: config_host['FUZZ_EXE_LDFLAGS'].split() + + link_args: fuzz_exe_ldflags + ['-Wl,-wrap,qtest_inb', '-Wl,-wrap,qtest_inw', '-Wl,-wrap,qtest_inl', diff --git a/tests/qtest/libqos/libqtest.h b/tests/qtest/libqos/libqtest.h index a68dcd79d4..59e9271195 100644 --- a/tests/qtest/libqos/libqtest.h +++ b/tests/qtest/libqos/libqtest.h @@ -588,6 +588,14 @@ bool qtest_big_endian(QTestState *s); */ const char *qtest_get_arch(void); +/** + * qtest_has_accel: + * @accel_name: Accelerator name to check for. + * + * Returns: true if the accelerator is built in. + */ +bool qtest_has_accel(const char *accel_name); + /** * qtest_add_func: * @str: Test case path. diff --git a/tests/qtest/libqos/meson.build b/tests/qtest/libqos/meson.build index 1f5c8f1053..4af1f04787 100644 --- a/tests/qtest/libqos/meson.build +++ b/tests/qtest/libqos/meson.build @@ -5,6 +5,7 @@ libqos_srcs = files('../libqtest.c', 'fw_cfg.c', 'malloc.c', 'libqos.c', + 'sdhci-cmd.c', # spapr 'malloc-spapr.c', diff --git a/tests/qtest/libqos/sdhci-cmd.c b/tests/qtest/libqos/sdhci-cmd.c new file mode 100644 index 0000000000..2d9e518341 --- /dev/null +++ b/tests/qtest/libqos/sdhci-cmd.c @@ -0,0 +1,116 @@ +/* + * MMC Host Controller Commands + * + * Copyright (c) 2021 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "qemu/osdep.h" +#include "sdhci-cmd.h" +#include "libqtest.h" + +static ssize_t read_fifo(QTestState *qts, uint64_t reg, char *msg, size_t count) +{ + uint32_t mask = 0xff; + size_t index = 0; + uint32_t msg_frag; + int size; + while (index < count) { + size = count - index; + if (size > 4) { + size = 4; + } + msg_frag = qtest_readl(qts, reg); + while (size > 0) { + msg[index] = msg_frag & mask; + if (msg[index++] == 0) { + return index; + } + msg_frag >>= 8; + --size; + } + } + return index; +} + +static void write_fifo(QTestState *qts, uint64_t reg, const char *msg, + size_t count) +{ + size_t index = 0; + uint32_t msg_frag; + int size; + int frag_i; + while (index < count) { + size = count - index; + if (size > 4) { + size = 4; + } + msg_frag = 0; + frag_i = 0; + while (frag_i < size) { + msg_frag |= ((uint32_t)msg[index++]) << (frag_i * 8); + ++frag_i; + } + qtest_writel(qts, reg, msg_frag); + } +} + +static void fill_block(QTestState *qts, uint64_t reg, int count) +{ + while (--count >= 0) { + qtest_writel(qts, reg, 0); + } +} + +void sdhci_cmd_regs(QTestState *qts, uint64_t base_addr, uint16_t blksize, + uint16_t blkcnt, uint32_t argument, uint16_t trnmod, + uint16_t cmdreg) +{ + qtest_writew(qts, base_addr + SDHC_BLKSIZE, blksize); + qtest_writew(qts, base_addr + SDHC_BLKCNT, blkcnt); + qtest_writel(qts, base_addr + SDHC_ARGUMENT, argument); + qtest_writew(qts, base_addr + SDHC_TRNMOD, trnmod); + qtest_writew(qts, base_addr + SDHC_CMDREG, cmdreg); +} + +ssize_t sdhci_read_cmd(QTestState *qts, uint64_t base_addr, char *msg, + size_t count) +{ + sdhci_cmd_regs(qts, base_addr, count, 1, 0, + SDHC_TRNS_MULTI | SDHC_TRNS_READ | SDHC_TRNS_BLK_CNT_EN, + SDHC_READ_MULTIPLE_BLOCK | SDHC_CMD_DATA_PRESENT); + + /* read sd fifo_buffer */ + ssize_t bytes_read = read_fifo(qts, base_addr + SDHC_BDATA, msg, count); + + sdhci_cmd_regs(qts, base_addr, 0, 0, 0, + SDHC_TRNS_MULTI | SDHC_TRNS_READ | SDHC_TRNS_BLK_CNT_EN, + SDHC_STOP_TRANSMISSION); + + return bytes_read; +} + +void sdhci_write_cmd(QTestState *qts, uint64_t base_addr, const char *msg, + size_t count, size_t blksize) +{ + sdhci_cmd_regs(qts, base_addr, blksize, 1, 0, + SDHC_TRNS_MULTI | SDHC_TRNS_WRITE | SDHC_TRNS_BLK_CNT_EN, + SDHC_WRITE_MULTIPLE_BLOCK | SDHC_CMD_DATA_PRESENT); + + /* write to sd fifo_buffer */ + write_fifo(qts, base_addr + SDHC_BDATA, msg, count); + fill_block(qts, base_addr + SDHC_BDATA, (blksize - count) / 4); + + sdhci_cmd_regs(qts, base_addr, 0, 0, 0, + SDHC_TRNS_MULTI | SDHC_TRNS_WRITE | SDHC_TRNS_BLK_CNT_EN, + SDHC_STOP_TRANSMISSION); +} diff --git a/tests/qtest/libqos/sdhci-cmd.h b/tests/qtest/libqos/sdhci-cmd.h new file mode 100644 index 0000000000..64763c5a2a --- /dev/null +++ b/tests/qtest/libqos/sdhci-cmd.h @@ -0,0 +1,70 @@ +/* + * MMC Host Controller Commands + * + * Copyright (c) 2021 Google LLC + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +#include "libqtest.h" + +/* more details at hw/sd/sdhci-internal.h */ +#define SDHC_BLKSIZE 0x04 +#define SDHC_BLKCNT 0x06 +#define SDHC_ARGUMENT 0x08 +#define SDHC_TRNMOD 0x0C +#define SDHC_CMDREG 0x0E +#define SDHC_BDATA 0x20 +#define SDHC_PRNSTS 0x24 +#define SDHC_BLKGAP 0x2A +#define SDHC_CLKCON 0x2C +#define SDHC_SWRST 0x2F +#define SDHC_CAPAB 0x40 +#define SDHC_MAXCURR 0x48 +#define SDHC_HCVER 0xFE + +/* TRNSMOD Reg */ +#define SDHC_TRNS_BLK_CNT_EN 0x0002 +#define SDHC_TRNS_READ 0x0010 +#define SDHC_TRNS_WRITE 0x0000 +#define SDHC_TRNS_MULTI 0x0020 + +/* CMD Reg */ +#define SDHC_CMD_DATA_PRESENT (1 << 5) +#define SDHC_ALL_SEND_CID (2 << 8) +#define SDHC_SEND_RELATIVE_ADDR (3 << 8) +#define SDHC_SELECT_DESELECT_CARD (7 << 8) +#define SDHC_SEND_CSD (9 << 8) +#define SDHC_STOP_TRANSMISSION (12 << 8) +#define SDHC_READ_MULTIPLE_BLOCK (18 << 8) +#define SDHC_WRITE_MULTIPLE_BLOCK (25 << 8) +#define SDHC_APP_CMD (55 << 8) + +/* SWRST Reg */ +#define SDHC_RESET_ALL 0x01 + +/* CLKCTRL Reg */ +#define SDHC_CLOCK_INT_EN 0x0001 +#define SDHC_CLOCK_INT_STABLE 0x0002 +#define SDHC_CLOCK_SDCLK_EN (1 << 2) + +/* Set registers needed to send commands to SD */ +void sdhci_cmd_regs(QTestState *qts, uint64_t base_addr, uint16_t blksize, + uint16_t blkcnt, uint32_t argument, uint16_t trnmod, + uint16_t cmdreg); + +/* Read at most 1 block of SD using non-DMA */ +ssize_t sdhci_read_cmd(QTestState *qts, uint64_t base_addr, char *msg, + size_t count); + +/* Write at most 1 block of SD using non-DMA */ +void sdhci_write_cmd(QTestState *qts, uint64_t base_addr, const char *msg, + size_t count, size_t blksize); diff --git a/tests/qtest/libqtest.c b/tests/qtest/libqtest.c index 73f6b977a6..25aeea385b 100644 --- a/tests/qtest/libqtest.c +++ b/tests/qtest/libqtest.c @@ -922,6 +922,33 @@ const char *qtest_get_arch(void) return end + 1; } +bool qtest_has_accel(const char *accel_name) +{ + if (g_str_equal(accel_name, "tcg")) { +#if defined(CONFIG_TCG) + return true; +#else + return false; +#endif + } else if (g_str_equal(accel_name, "kvm")) { + int i; + const char *arch = qtest_get_arch(); + const char *targets[] = { CONFIG_KVM_TARGETS }; + + for (i = 0; i < ARRAY_SIZE(targets); i++) { + if (!strncmp(targets[i], arch, strlen(arch))) { + if (!access("/dev/kvm", R_OK | W_OK)) { + return true; + } + } + } + } else { + /* not implemented */ + g_assert_not_reached(); + } + return false; +} + bool qtest_get_irq(QTestState *s, int num) { /* dummy operation in order to make sure irq is up to date */ diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build index 757bb8499a..c9d8458062 100644 --- a/tests/qtest/meson.build +++ b/tests/qtest/meson.build @@ -68,12 +68,12 @@ qtests_i386 = \ (config_all_devices.has_key('CONFIG_RTL8139_PCI') ? ['rtl8139-test'] : []) + \ (config_all_devices.has_key('CONFIG_E1000E_PCI_EXPRESS') ? ['fuzz-e1000e-test'] : []) + \ (config_all_devices.has_key('CONFIG_ESP_PCI') ? ['am53c974-test'] : []) + \ + (unpack_edk2_blobs ? ['bios-tables-test'] : []) + \ qtests_pci + \ ['fdc-test', 'ide-test', 'hd-geo-test', 'boot-order-test', - 'bios-tables-test', 'rtc-test', 'i440fx-test', 'fw_cfg-test', @@ -180,7 +180,7 @@ qtests_arm = \ # TODO: once aarch64 TCG is fixed on ARM 32 bit host, make bios-tables-test unconditional qtests_aarch64 = \ - (cpu != 'arm' ? ['bios-tables-test'] : []) + \ + (cpu != 'arm' and unpack_edk2_blobs ? ['bios-tables-test'] : []) + \ (config_all_devices.has_key('CONFIG_TPM_TIS_SYSBUS') ? ['tpm-tis-device-test'] : []) + \ (config_all_devices.has_key('CONFIG_TPM_TIS_SYSBUS') ? ['tpm-tis-device-swtpm-test'] : []) + \ ['arm-cpu-features', @@ -269,13 +269,13 @@ foreach dir : target_dirs qtest_emulator = emulators['qemu-system-' + target_base] target_qtests = get_variable('qtests_' + target_base, []) + qtests_generic - test_deps = [] + test_deps = roms qtest_env = environment() if have_tools qtest_env.set('QTEST_QEMU_IMG', './qemu-img') test_deps += [qemu_img] endif - qtest_env.set('G_TEST_DBUS_DAEMON', meson.source_root() / 'tests/dbus-vmstate-daemon.sh') + qtest_env.set('G_TEST_DBUS_DAEMON', meson.project_source_root() / 'tests/dbus-vmstate-daemon.sh') qtest_env.set('QTEST_QEMU_BINARY', './qemu-system-' + target_base) if have_tools and have_vhost_user_blk_server qtest_env.set('QTEST_QEMU_STORAGE_DAEMON_BINARY', './storage-daemon/qemu-storage-daemon') diff --git a/tests/qtest/migration-test.c b/tests/qtest/migration-test.c index cc5e83d98a..7b42f6fd90 100644 --- a/tests/qtest/migration-test.c +++ b/tests/qtest/migration-test.c @@ -1420,6 +1420,7 @@ static bool kvm_dirty_ring_supported(void) int main(int argc, char **argv) { char template[] = "/tmp/migration-test-XXXXXX"; + const bool has_kvm = qtest_has_accel("kvm"); int ret; g_test_init(&argc, &argv, NULL); @@ -1434,8 +1435,7 @@ int main(int argc, char **argv) * some reason) */ if (g_str_equal(qtest_get_arch(), "ppc64") && - (access("/sys/module/kvm_hv", F_OK) || - access("/dev/kvm", R_OK | W_OK))) { + (!has_kvm || access("/sys/module/kvm_hv", F_OK))) { g_test_message("Skipping test: kvm_hv not available"); return g_test_run(); } @@ -1444,16 +1444,9 @@ int main(int argc, char **argv) * Similar to ppc64, s390x seems to be touchy with TCG, so disable it * there until the problems are resolved */ - if (g_str_equal(qtest_get_arch(), "s390x")) { -#if defined(HOST_S390X) - if (access("/dev/kvm", R_OK | W_OK)) { - g_test_message("Skipping test: kvm not available"); - return g_test_run(); - } -#else - g_test_message("Skipping test: Need s390x host to work properly"); + if (g_str_equal(qtest_get_arch(), "s390x") && !has_kvm) { + g_test_message("Skipping test: s390x host with KVM is required"); return g_test_run(); -#endif } tmpfs = mkdtemp(template); diff --git a/tests/qtest/numa-test.c b/tests/qtest/numa-test.c index c677cd63c4..90bf68a5b3 100644 --- a/tests/qtest/numa-test.c +++ b/tests/qtest/numa-test.c @@ -42,7 +42,8 @@ static void test_def_cpu_split(const void *data) g_autofree char *s = NULL; g_autofree char *cli = NULL; - cli = make_cli(data, "-machine smp.cpus=8 -numa node,memdev=ram -numa node"); + cli = make_cli(data, "-machine smp.cpus=8,smp.sockets=8 " + "-numa node,memdev=ram -numa node"); qts = qtest_init(cli); s = qtest_hmp(qts, "info numa"); @@ -265,7 +266,8 @@ static void pc_dynamic_cpu_cfg(const void *data) QTestState *qs; g_autofree char *cli = NULL; - cli = make_cli(data, "-nodefaults --preconfig -machine smp.cpus=2"); + cli = make_cli(data, "-nodefaults --preconfig " + "-machine smp.cpus=2,smp.sockets=2"); qs = qtest_init(cli); /* create 2 numa nodes */ diff --git a/tests/qtest/qmp-cmd-test.c b/tests/qtest/qmp-cmd-test.c index c98b78d033..7f103ea3fd 100644 --- a/tests/qtest/qmp-cmd-test.c +++ b/tests/qtest/qmp-cmd-test.c @@ -46,6 +46,14 @@ static int query_error_class(const char *cmd) { "query-balloon", ERROR_CLASS_DEVICE_NOT_ACTIVE }, { "query-hotpluggable-cpus", ERROR_CLASS_GENERIC_ERROR }, { "query-vm-generation-id", ERROR_CLASS_GENERIC_ERROR }, +#ifndef CONFIG_PROFILER + { "x-query-profile", ERROR_CLASS_GENERIC_ERROR }, +#endif + /* Only valid with a USB bus added */ + { "x-query-usb", ERROR_CLASS_GENERIC_ERROR }, + /* Only valid with accel=tcg */ + { "x-query-jit", ERROR_CLASS_GENERIC_ERROR }, + { "x-query-opcount", ERROR_CLASS_GENERIC_ERROR }, { NULL, -1 } }; int i; @@ -100,6 +108,8 @@ static bool query_is_ignored(const char *cmd) /* Success depends on Host or Hypervisor SEV support */ "query-sev", "query-sev-capabilities", + "query-sgx", + "query-sgx-capabilities", NULL }; int i; diff --git a/tests/qtest/vhost-user-blk-test.c b/tests/qtest/vhost-user-blk-test.c index 6f108a1b62..62e670f39b 100644 --- a/tests/qtest/vhost-user-blk-test.c +++ b/tests/qtest/vhost-user-blk-test.c @@ -906,9 +906,9 @@ static void start_vhost_user_blk(GString *cmd_line, int vus_instances, img_path = drive_create(); g_string_append_printf(storage_daemon_command, "--blockdev driver=file,node-name=disk%d,filename=%s " - "--export type=vhost-user-blk,id=disk%d,addr.type=unix,addr.path=%s," + "--export type=vhost-user-blk,id=disk%d,addr.type=fd,addr.str=%d," "node-name=disk%i,writable=on,num-queues=%d ", - i, img_path, i, sock_path, i, num_queues); + i, img_path, i, fd, i, num_queues); g_string_append_printf(cmd_line, "-chardev socket,id=char%d,path=%s ", i + 1, sock_path); diff --git a/tests/qtest/virtio-net-test.c b/tests/qtest/virtio-net-test.c index a08e2ffe12..8bf74e516c 100644 --- a/tests/qtest/virtio-net-test.c +++ b/tests/qtest/virtio-net-test.c @@ -319,7 +319,7 @@ static void register_virtio_net_test(void) .before = virtio_net_test_setup, }; - qos_add_test("hotplug", "virtio-pci", hotplug, &opts); + qos_add_test("hotplug", "virtio-net-pci", hotplug, &opts); #ifndef _WIN32 qos_add_test("basic", "virtio-net", send_recv_test, &opts); qos_add_test("rx_stop_cont", "virtio-net", stop_cont_test, &opts); diff --git a/tests/tcg/configure.sh b/tests/tcg/configure.sh index 1f985ccfc0..9b76f58258 100755 --- a/tests/tcg/configure.sh +++ b/tests/tcg/configure.sh @@ -46,7 +46,7 @@ fi : ${cross_cc_aarch64="aarch64-linux-gnu-gcc"} : ${cross_cc_aarch64_be="$cross_cc_aarch64"} : ${cross_cc_cflags_aarch64_be="-mbig-endian"} -: $(cross_cc_alpha="alpha-linux-gnu-gcc") +: ${cross_cc_alpha="alpha-linux-gnu-gcc"} : ${cross_cc_arm="arm-linux-gnueabihf-gcc"} : ${cross_cc_cflags_armeb="-mbig-endian"} : ${cross_cc_hexagon="hexagon-unknown-linux-musl-clang"} @@ -55,17 +55,19 @@ fi : ${cross_cc_i386="i686-linux-gnu-gcc"} : ${cross_cc_cflags_i386="-m32"} : ${cross_cc_m68k="m68k-linux-gnu-gcc"} -: $(cross_cc_mips64el="mips64el-linux-gnuabi64-gcc") -: $(cross_cc_mips64="mips64-linux-gnuabi64-gcc") -: $(cross_cc_mipsel="mipsel-linux-gnu-gcc") -: $(cross_cc_mips="mips-linux-gnu-gcc") +: ${cross_cc_microblaze="microblaze-linux-musl-gcc"} +: ${cross_cc_mips64el="mips64el-linux-gnuabi64-gcc"} +: ${cross_cc_mips64="mips64-linux-gnuabi64-gcc"} +: ${cross_cc_mipsel="mipsel-linux-gnu-gcc"} +: ${cross_cc_mips="mips-linux-gnu-gcc"} +: ${cross_cc_nios2="nios2-linux-gnu-gcc"} : ${cross_cc_ppc="powerpc-linux-gnu-gcc"} : ${cross_cc_cflags_ppc="-m32"} : ${cross_cc_ppc64="powerpc64-linux-gnu-gcc"} : ${cross_cc_ppc64le="powerpc64le-linux-gnu-gcc"} -: $(cross_cc_riscv64="riscv64-linux-gnu-gcc") +: ${cross_cc_riscv64="riscv64-linux-gnu-gcc"} : ${cross_cc_s390x="s390x-linux-gnu-gcc"} -: $(cross_cc_sh4="sh4-linux-gnu-gcc") +: ${cross_cc_sh4="sh4-linux-gnu-gcc"} : ${cross_cc_cflags_sparc="-m32 -mv8plus -mcpu=ultrasparc"} : ${cross_cc_sparc64="sparc64-linux-gnu-gcc"} : ${cross_cc_cflags_sparc64="-m64 -mcpu=ultrasparc"} @@ -133,6 +135,11 @@ for target in $target_list; do container_image=debian-m68k-cross container_cross_cc=m68k-linux-gnu-gcc ;; + microblaze-*) + container_hosts=x86_64 + container_image=debian-microblaze-cross + container_cross_cc=microblaze-linux-musl-gcc + ;; mips64el-*) container_hosts=x86_64 container_image=debian-mips64el-cross @@ -153,6 +160,11 @@ for target in $target_list; do container_image=debian-mips-cross container_cross_cc=mips-linux-gnu-gcc ;; + nios2-*) + container_hosts=x86_64 + container_image=debian-nios2-cross + container_cross_cc=nios2-linux-gnu-gcc + ;; ppc-*|ppc64abi32-*) container_hosts=x86_64 container_image=debian-powerpc-test-cross diff --git a/tests/tcg/hexagon/Makefile.target b/tests/tcg/hexagon/Makefile.target index 050cd61c1a..8b07a28166 100644 --- a/tests/tcg/hexagon/Makefile.target +++ b/tests/tcg/hexagon/Makefile.target @@ -28,6 +28,7 @@ first: $(HEX_SRC)/first.S $(CC) -static -mv67 -nostdlib $^ -o $@ HEX_TESTS = first +HEX_TESTS += hex_sigsegv HEX_TESTS += misc HEX_TESTS += preg_alias HEX_TESTS += dual_stores @@ -39,5 +40,6 @@ HEX_TESTS += load_unpack HEX_TESTS += load_align HEX_TESTS += atomics HEX_TESTS += fpstuff +HEX_TESTS += overflow TESTS += $(HEX_TESTS) diff --git a/tests/tcg/hexagon/hex_sigsegv.c b/tests/tcg/hexagon/hex_sigsegv.c new file mode 100644 index 0000000000..dc2b349257 --- /dev/null +++ b/tests/tcg/hexagon/hex_sigsegv.c @@ -0,0 +1,106 @@ +/* + * Copyright(c) 2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +/* + * Test the VLIW semantics of two stores in a packet + * + * When a packet has 2 stores, either both commit or neither commit. + * We test this with a packet that does stores to both NULL and a global + * variable, "should_not_change". After the SIGSEGV is caught, we check + * that the "should_not_change" value is the same. + */ + +#include +#include +#include +#include +#include +#include +#include + +typedef unsigned char uint8_t; + +int err; +int segv_caught; + +#define SHOULD_NOT_CHANGE_VAL 5 +int should_not_change = SHOULD_NOT_CHANGE_VAL; + +#define BUF_SIZE 300 +unsigned char buf[BUF_SIZE]; + + +static void __check(const char *filename, int line, int x, int expect) +{ + if (x != expect) { + printf("ERROR %s:%d - %d != %d\n", + filename, line, x, expect); + err++; + } +} + +#define check(x, expect) __check(__FILE__, __LINE__, (x), (expect)) + +static void __chk_error(const char *filename, int line, int ret) +{ + if (ret < 0) { + printf("ERROR %s:%d - %d\n", filename, line, ret); + err++; + } +} + +#define chk_error(ret) __chk_error(__FILE__, __LINE__, (ret)) + +jmp_buf jmp_env; + +static void sig_segv(int sig, siginfo_t *info, void *puc) +{ + check(sig, SIGSEGV); + segv_caught = 1; + longjmp(jmp_env, 1); +} + +int main() +{ + struct sigaction act; + + /* SIGSEGV test */ + act.sa_sigaction = sig_segv; + sigemptyset(&act.sa_mask); + act.sa_flags = SA_SIGINFO; + chk_error(sigaction(SIGSEGV, &act, NULL)); + if (setjmp(jmp_env) == 0) { + asm volatile("r18 = ##should_not_change\n\t" + "r19 = #0\n\t" + "{\n\t" + " memw(r18) = #7\n\t" + " memw(r19) = #0\n\t" + "}\n\t" + : : : "r18", "r19", "memory"); + } + + act.sa_handler = SIG_DFL; + sigemptyset(&act.sa_mask); + act.sa_flags = 0; + chk_error(sigaction(SIGSEGV, &act, NULL)); + + check(segv_caught, 1); + check(should_not_change, SHOULD_NOT_CHANGE_VAL); + + puts(err ? "FAIL" : "PASS"); + return err ? EXIT_FAILURE : EXIT_SUCCESS; +} diff --git a/tests/tcg/hexagon/hvx_histogram.c b/tests/tcg/hexagon/hvx_histogram.c new file mode 100644 index 0000000000..43377a9abb --- /dev/null +++ b/tests/tcg/hexagon/hvx_histogram.c @@ -0,0 +1,88 @@ +/* + * Copyright(c) 2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include +#include +#include +#include "hvx_histogram_row.h" + +const int vector_len = 128; +const int width = 275; +const int height = 20; +const int stride = (width + vector_len - 1) & -vector_len; + +int err; + +static uint8_t input[height][stride] __attribute__((aligned(128))) = { +#include "hvx_histogram_input.h" +}; + +static int result[256] __attribute__((aligned(128))); +static int expect[256] __attribute__((aligned(128))); + +static void check(void) +{ + for (int i = 0; i < 256; i++) { + int res = result[i]; + int exp = expect[i]; + if (res != exp) { + printf("ERROR at %3d: 0x%04x != 0x%04x\n", + i, res, exp); + err++; + } + } +} + +static void ref_histogram(uint8_t *src, int stride, int width, int height, + int *hist) +{ + for (int i = 0; i < 256; i++) { + hist[i] = 0; + } + + for (int i = 0; i < height; i++) { + for (int j = 0; j < width; j++) { + hist[src[i * stride + j]]++; + } + } +} + +static void hvx_histogram(uint8_t *src, int stride, int width, int height, + int *hist) +{ + int n = 8192 / width; + + for (int i = 0; i < 256; i++) { + hist[i] = 0; + } + + for (int i = 0; i < height; i += n) { + int k = height - i > n ? n : height - i; + hvx_histogram_row(src, stride, width, k, hist); + src += n * stride; + } +} + +int main() +{ + ref_histogram(&input[0][0], stride, width, height, expect); + hvx_histogram(&input[0][0], stride, width, height, result); + check(); + + puts(err ? "FAIL" : "PASS"); + return err ? 1 : 0; +} diff --git a/tests/tcg/hexagon/hvx_histogram_input.h b/tests/tcg/hexagon/hvx_histogram_input.h new file mode 100644 index 0000000000..2f9109255e --- /dev/null +++ b/tests/tcg/hexagon/hvx_histogram_input.h @@ -0,0 +1,717 @@ +/* + * Copyright(c) 2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + + { 0x26, 0x32, 0x2e, 0x2e, 0x2d, 0x2c, 0x2d, 0x2d, + 0x2c, 0x2e, 0x31, 0x33, 0x36, 0x39, 0x3b, 0x3f, + 0x42, 0x46, 0x4a, 0x4c, 0x51, 0x53, 0x53, 0x54, + 0x56, 0x57, 0x58, 0x57, 0x56, 0x52, 0x51, 0x4f, + 0x4c, 0x49, 0x47, 0x42, 0x3e, 0x3b, 0x38, 0x35, + 0x33, 0x30, 0x2e, 0x2c, 0x2b, 0x2a, 0x2a, 0x28, + 0x28, 0x27, 0x27, 0x28, 0x29, 0x2a, 0x2c, 0x2e, + 0x2f, 0x33, 0x36, 0x38, 0x3c, 0x3d, 0x40, 0x42, + 0x43, 0x42, 0x43, 0x44, 0x43, 0x41, 0x40, 0x3b, + 0x3b, 0x3a, 0x38, 0x35, 0x32, 0x2f, 0x2c, 0x29, + 0x27, 0x26, 0x23, 0x21, 0x1e, 0x1c, 0x1a, 0x19, + 0x17, 0x15, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, + 0x0f, 0x0e, 0x0f, 0x0f, 0x0e, 0x0d, 0x0d, 0x0d, + 0x0c, 0x0d, 0x0e, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0d, 0x0c, 0x0f, 0x0e, 0x0f, 0x0f, + 0x0f, 0x10, 0x11, 0x12, 0x14, 0x16, 0x17, 0x19, + 0x1c, 0x1d, 0x21, 0x25, 0x27, 0x29, 0x2b, 0x2f, + 0x31, 0x33, 0x36, 0x38, 0x39, 0x3a, 0x3b, 0x3c, + 0x3c, 0x3d, 0x3e, 0x3e, 0x3c, 0x3b, 0x3a, 0x39, + 0x39, 0x3a, 0x3a, 0x3a, 0x3a, 0x3c, 0x3e, 0x43, + 0x47, 0x4a, 0x4d, 0x51, 0x51, 0x54, 0x56, 0x56, + 0x57, 0x56, 0x53, 0x4f, 0x4b, 0x47, 0x43, 0x41, + 0x3e, 0x3c, 0x3a, 0x37, 0x36, 0x33, 0x32, 0x34, + 0x34, 0x34, 0x34, 0x35, 0x36, 0x39, 0x3d, 0x3d, + 0x3f, 0x40, 0x40, 0x40, 0x40, 0x3e, 0x40, 0x40, + 0x42, 0x44, 0x47, 0x48, 0x4b, 0x4e, 0x56, 0x5c, + 0x62, 0x68, 0x6f, 0x73, 0x76, 0x79, 0x7a, 0x7c, + 0x7e, 0x7c, 0x78, 0x72, 0x6e, 0x69, 0x65, 0x60, + 0x5b, 0x56, 0x52, 0x4d, 0x4a, 0x48, 0x47, 0x46, + 0x44, 0x43, 0x42, 0x41, 0x41, 0x41, 0x40, 0x40, + 0x3f, 0x3e, 0x3d, 0x3c, 0x3b, 0x3b, 0x38, 0x37, + 0x36, 0x35, 0x36, 0x35, 0x36, 0x37, 0x38, 0x3c, + 0x3d, 0x3f, 0x42, 0x44, 0x46, 0x48, 0x4b, 0x4c, + 0x4e, 0x4e, 0x4d, 0x4c, 0x4a, 0x48, 0x49, 0x49, + 0x4b, 0x4d, 0x4e, }, + { 0x23, 0x2d, 0x29, 0x29, 0x28, 0x28, 0x29, 0x29, + 0x28, 0x2b, 0x2d, 0x2f, 0x32, 0x34, 0x36, 0x3a, + 0x3d, 0x41, 0x44, 0x47, 0x4a, 0x4c, 0x4e, 0x4e, + 0x50, 0x51, 0x51, 0x51, 0x4f, 0x4c, 0x4b, 0x48, + 0x46, 0x44, 0x40, 0x3d, 0x39, 0x36, 0x34, 0x30, + 0x2f, 0x2d, 0x2a, 0x29, 0x28, 0x27, 0x26, 0x25, + 0x25, 0x24, 0x24, 0x24, 0x26, 0x28, 0x28, 0x2a, + 0x2b, 0x2e, 0x32, 0x34, 0x37, 0x39, 0x3b, 0x3c, + 0x3d, 0x3d, 0x3e, 0x3e, 0x3e, 0x3c, 0x3b, 0x38, + 0x37, 0x35, 0x33, 0x30, 0x2e, 0x2b, 0x27, 0x25, + 0x24, 0x21, 0x20, 0x1d, 0x1b, 0x1a, 0x18, 0x16, + 0x15, 0x14, 0x13, 0x12, 0x10, 0x11, 0x10, 0x0e, + 0x0e, 0x0d, 0x0d, 0x0d, 0x0d, 0x0c, 0x0c, 0x0b, + 0x0b, 0x0b, 0x0c, 0x0b, 0x0b, 0x09, 0x0a, 0x0b, + 0x0b, 0x0a, 0x0a, 0x0c, 0x0c, 0x0c, 0x0d, 0x0e, + 0x0e, 0x0f, 0x0f, 0x11, 0x12, 0x15, 0x15, 0x17, + 0x1a, 0x1c, 0x1f, 0x22, 0x25, 0x26, 0x29, 0x2a, + 0x2d, 0x30, 0x33, 0x34, 0x35, 0x35, 0x37, 0x37, + 0x39, 0x3a, 0x39, 0x38, 0x37, 0x36, 0x36, 0x37, + 0x35, 0x36, 0x35, 0x35, 0x36, 0x37, 0x3a, 0x3e, + 0x40, 0x43, 0x48, 0x49, 0x4b, 0x4c, 0x4d, 0x4e, + 0x4f, 0x4f, 0x4c, 0x48, 0x45, 0x41, 0x3e, 0x3b, + 0x3a, 0x37, 0x36, 0x33, 0x32, 0x31, 0x30, 0x31, + 0x32, 0x31, 0x31, 0x31, 0x31, 0x34, 0x37, 0x38, + 0x3a, 0x3b, 0x3b, 0x3b, 0x3c, 0x3b, 0x3d, 0x3e, + 0x3f, 0x40, 0x43, 0x44, 0x47, 0x4b, 0x4f, 0x56, + 0x5a, 0x60, 0x66, 0x69, 0x6a, 0x6e, 0x71, 0x72, + 0x73, 0x72, 0x6d, 0x69, 0x66, 0x60, 0x5c, 0x59, + 0x54, 0x50, 0x4d, 0x48, 0x46, 0x44, 0x44, 0x43, + 0x42, 0x41, 0x41, 0x40, 0x3f, 0x3f, 0x3e, 0x3d, + 0x3d, 0x3d, 0x3c, 0x3a, 0x39, 0x38, 0x35, 0x35, + 0x34, 0x34, 0x35, 0x34, 0x35, 0x36, 0x39, 0x3c, + 0x3d, 0x3e, 0x41, 0x43, 0x44, 0x46, 0x48, 0x49, + 0x4a, 0x49, 0x48, 0x47, 0x45, 0x43, 0x43, 0x44, + 0x45, 0x47, 0x48, }, + { 0x23, 0x2d, 0x2a, 0x2a, 0x29, 0x29, 0x2a, 0x2a, + 0x29, 0x2c, 0x2d, 0x2f, 0x32, 0x34, 0x36, 0x3a, + 0x3d, 0x40, 0x44, 0x48, 0x4a, 0x4c, 0x4e, 0x4e, + 0x50, 0x51, 0x51, 0x51, 0x4f, 0x4c, 0x4b, 0x48, + 0x46, 0x44, 0x40, 0x3d, 0x39, 0x36, 0x34, 0x30, + 0x2f, 0x2d, 0x2a, 0x29, 0x28, 0x27, 0x26, 0x25, + 0x25, 0x24, 0x24, 0x25, 0x26, 0x28, 0x29, 0x2a, + 0x2b, 0x2e, 0x31, 0x34, 0x37, 0x39, 0x3b, 0x3c, + 0x3d, 0x3e, 0x3e, 0x3d, 0x3e, 0x3c, 0x3c, 0x3a, + 0x37, 0x35, 0x33, 0x30, 0x2f, 0x2b, 0x28, 0x26, + 0x24, 0x21, 0x20, 0x1e, 0x1c, 0x1b, 0x18, 0x17, + 0x16, 0x14, 0x13, 0x12, 0x10, 0x10, 0x0f, 0x0e, + 0x0f, 0x0e, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0c, + 0x0b, 0x0b, 0x0c, 0x0c, 0x0c, 0x0b, 0x0b, 0x0c, + 0x0c, 0x0b, 0x0b, 0x0c, 0x0d, 0x0c, 0x0e, 0x0e, + 0x0e, 0x0f, 0x11, 0x11, 0x13, 0x14, 0x16, 0x18, + 0x1a, 0x1d, 0x1f, 0x22, 0x25, 0x26, 0x29, 0x2b, + 0x2d, 0x31, 0x33, 0x34, 0x36, 0x37, 0x38, 0x38, + 0x39, 0x3a, 0x39, 0x38, 0x37, 0x36, 0x37, 0x37, + 0x35, 0x36, 0x35, 0x36, 0x35, 0x38, 0x3a, 0x3e, + 0x40, 0x41, 0x45, 0x47, 0x49, 0x4a, 0x4c, 0x4d, + 0x4e, 0x4d, 0x4a, 0x47, 0x44, 0x40, 0x3d, 0x3b, + 0x39, 0x37, 0x34, 0x34, 0x32, 0x31, 0x31, 0x33, + 0x32, 0x31, 0x32, 0x33, 0x32, 0x36, 0x38, 0x39, + 0x3b, 0x3c, 0x3c, 0x3c, 0x3d, 0x3d, 0x3e, 0x3e, + 0x41, 0x42, 0x43, 0x45, 0x48, 0x4c, 0x50, 0x56, + 0x5b, 0x5f, 0x62, 0x67, 0x69, 0x6c, 0x6e, 0x6e, + 0x70, 0x6f, 0x6b, 0x67, 0x63, 0x5e, 0x5b, 0x58, + 0x54, 0x51, 0x4e, 0x4a, 0x48, 0x46, 0x46, 0x46, + 0x45, 0x46, 0x44, 0x43, 0x44, 0x43, 0x42, 0x42, + 0x41, 0x40, 0x3f, 0x3e, 0x3c, 0x3b, 0x3a, 0x39, + 0x39, 0x39, 0x38, 0x37, 0x37, 0x3a, 0x3e, 0x40, + 0x42, 0x43, 0x47, 0x47, 0x48, 0x4a, 0x4b, 0x4c, + 0x4c, 0x4b, 0x4a, 0x48, 0x46, 0x44, 0x43, 0x45, + 0x45, 0x46, 0x47, }, + { 0x21, 0x2b, 0x28, 0x28, 0x28, 0x28, 0x29, 0x29, + 0x28, 0x2a, 0x2d, 0x30, 0x32, 0x34, 0x37, 0x3a, + 0x3c, 0x40, 0x44, 0x48, 0x4a, 0x4c, 0x4e, 0x4e, + 0x50, 0x51, 0x52, 0x51, 0x4f, 0x4b, 0x4b, 0x48, + 0x45, 0x43, 0x3f, 0x3c, 0x39, 0x36, 0x33, 0x30, + 0x2f, 0x2d, 0x2b, 0x2a, 0x28, 0x27, 0x26, 0x25, + 0x24, 0x24, 0x24, 0x25, 0x27, 0x27, 0x29, 0x2a, + 0x2c, 0x2d, 0x31, 0x34, 0x37, 0x39, 0x3b, 0x3c, + 0x3d, 0x3e, 0x3e, 0x3e, 0x3e, 0x3d, 0x3c, 0x3a, + 0x37, 0x35, 0x33, 0x30, 0x2f, 0x2b, 0x28, 0x26, + 0x25, 0x21, 0x20, 0x1e, 0x1c, 0x19, 0x19, 0x18, + 0x17, 0x15, 0x15, 0x12, 0x11, 0x11, 0x11, 0x0f, + 0x0e, 0x0e, 0x0e, 0x0e, 0x0d, 0x0d, 0x0d, 0x0c, + 0x0c, 0x0c, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0e, 0x0e, 0x0f, 0x0f, + 0x0f, 0x10, 0x11, 0x13, 0x13, 0x15, 0x16, 0x18, + 0x1a, 0x1c, 0x1f, 0x22, 0x25, 0x28, 0x29, 0x2d, + 0x2f, 0x32, 0x34, 0x35, 0x36, 0x37, 0x38, 0x38, + 0x39, 0x3a, 0x39, 0x39, 0x37, 0x36, 0x37, 0x36, + 0x35, 0x35, 0x37, 0x35, 0x36, 0x37, 0x3a, 0x3d, + 0x3e, 0x41, 0x43, 0x46, 0x46, 0x47, 0x48, 0x49, + 0x4a, 0x49, 0x47, 0x45, 0x42, 0x3f, 0x3d, 0x3b, + 0x3a, 0x38, 0x36, 0x34, 0x32, 0x32, 0x32, 0x32, + 0x32, 0x31, 0x33, 0x32, 0x34, 0x37, 0x38, 0x38, + 0x3a, 0x3b, 0x3d, 0x3d, 0x3d, 0x3e, 0x3f, 0x41, + 0x42, 0x44, 0x44, 0x46, 0x49, 0x4d, 0x50, 0x54, + 0x58, 0x5c, 0x61, 0x63, 0x65, 0x69, 0x6a, 0x6c, + 0x6d, 0x6c, 0x68, 0x64, 0x61, 0x5c, 0x59, 0x57, + 0x53, 0x51, 0x4f, 0x4c, 0x4a, 0x48, 0x48, 0x49, + 0x49, 0x48, 0x48, 0x48, 0x47, 0x47, 0x46, 0x46, + 0x45, 0x44, 0x42, 0x41, 0x3f, 0x3e, 0x3c, 0x3c, + 0x3c, 0x3d, 0x3c, 0x3c, 0x3c, 0x3e, 0x41, 0x43, + 0x46, 0x48, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4e, + 0x4e, 0x4d, 0x4b, 0x49, 0x47, 0x44, 0x44, 0x45, + 0x45, 0x45, 0x46, }, + { 0x22, 0x2b, 0x27, 0x27, 0x27, 0x27, 0x28, 0x28, + 0x28, 0x2a, 0x2c, 0x2f, 0x30, 0x34, 0x37, 0x3b, + 0x3d, 0x41, 0x45, 0x48, 0x4a, 0x4c, 0x4e, 0x4e, + 0x50, 0x51, 0x52, 0x51, 0x4f, 0x4b, 0x4b, 0x47, + 0x45, 0x43, 0x3f, 0x3c, 0x39, 0x36, 0x33, 0x30, + 0x2f, 0x2d, 0x2b, 0x2a, 0x27, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x24, 0x25, 0x27, 0x27, 0x29, 0x2a, + 0x2c, 0x2e, 0x31, 0x34, 0x37, 0x39, 0x3a, 0x3b, + 0x3d, 0x3e, 0x3e, 0x3f, 0x3f, 0x3d, 0x3c, 0x3a, + 0x38, 0x36, 0x34, 0x31, 0x2e, 0x2c, 0x29, 0x26, + 0x25, 0x22, 0x20, 0x1e, 0x1c, 0x1a, 0x19, 0x18, + 0x16, 0x15, 0x14, 0x12, 0x10, 0x11, 0x11, 0x0f, + 0x0e, 0x0e, 0x0e, 0x0e, 0x0d, 0x0c, 0x0d, 0x0c, + 0x0c, 0x0c, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, + 0x0c, 0x0c, 0x0c, 0x0d, 0x0d, 0x0e, 0x0f, 0x0f, + 0x0f, 0x10, 0x11, 0x13, 0x13, 0x15, 0x15, 0x18, + 0x19, 0x1d, 0x1f, 0x21, 0x24, 0x27, 0x2a, 0x2c, + 0x30, 0x33, 0x35, 0x36, 0x37, 0x38, 0x39, 0x39, + 0x3a, 0x3a, 0x39, 0x39, 0x37, 0x36, 0x37, 0x36, + 0x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x39, 0x3a, + 0x3d, 0x3e, 0x41, 0x43, 0x43, 0x45, 0x46, 0x46, + 0x47, 0x46, 0x44, 0x42, 0x40, 0x3d, 0x3a, 0x39, + 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x32, 0x32, + 0x32, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, + 0x39, 0x3c, 0x3c, 0x3e, 0x3e, 0x3e, 0x41, 0x43, + 0x44, 0x45, 0x46, 0x48, 0x49, 0x4c, 0x51, 0x54, + 0x56, 0x5a, 0x5f, 0x61, 0x63, 0x65, 0x67, 0x69, + 0x6a, 0x69, 0x67, 0x61, 0x5f, 0x5b, 0x58, 0x56, + 0x54, 0x51, 0x50, 0x4e, 0x4c, 0x4a, 0x4b, 0x4c, + 0x4c, 0x4b, 0x4b, 0x4b, 0x4b, 0x49, 0x4a, 0x49, + 0x49, 0x48, 0x46, 0x44, 0x42, 0x41, 0x40, 0x3f, + 0x3f, 0x40, 0x40, 0x40, 0x40, 0x42, 0x46, 0x49, + 0x4b, 0x4c, 0x4f, 0x4f, 0x50, 0x52, 0x51, 0x51, + 0x50, 0x4f, 0x4c, 0x4a, 0x48, 0x46, 0x45, 0x44, + 0x44, 0x45, 0x46, }, + { 0x21, 0x2a, 0x27, 0x27, 0x27, 0x27, 0x27, 0x27, + 0x27, 0x29, 0x2d, 0x2f, 0x31, 0x34, 0x37, 0x3b, + 0x3e, 0x41, 0x45, 0x48, 0x4a, 0x4c, 0x4e, 0x4e, + 0x50, 0x51, 0x52, 0x51, 0x4f, 0x4b, 0x4b, 0x48, + 0x45, 0x43, 0x3f, 0x3c, 0x39, 0x36, 0x33, 0x2f, + 0x2f, 0x2d, 0x2a, 0x2a, 0x27, 0x26, 0x25, 0x24, + 0x22, 0x24, 0x24, 0x25, 0x27, 0x27, 0x29, 0x2a, + 0x2c, 0x2f, 0x31, 0x34, 0x37, 0x39, 0x3a, 0x3c, + 0x3d, 0x3e, 0x3f, 0x40, 0x3f, 0x3d, 0x3d, 0x3a, + 0x38, 0x36, 0x34, 0x31, 0x2e, 0x2c, 0x29, 0x26, + 0x25, 0x22, 0x21, 0x1f, 0x1d, 0x1b, 0x19, 0x18, + 0x16, 0x14, 0x14, 0x13, 0x11, 0x11, 0x11, 0x0f, + 0x0f, 0x0f, 0x0e, 0x0e, 0x0d, 0x0d, 0x0d, 0x0d, + 0x0d, 0x0d, 0x0c, 0x0b, 0x0b, 0x0b, 0x0b, 0x0c, + 0x0c, 0x0d, 0x0d, 0x0d, 0x0e, 0x0e, 0x0f, 0x0f, + 0x0f, 0x10, 0x13, 0x13, 0x14, 0x15, 0x17, 0x19, + 0x1a, 0x1d, 0x1f, 0x22, 0x25, 0x27, 0x2a, 0x2e, + 0x31, 0x33, 0x35, 0x38, 0x39, 0x3a, 0x3b, 0x3b, + 0x3c, 0x3c, 0x3b, 0x3a, 0x39, 0x38, 0x38, 0x37, + 0x36, 0x36, 0x37, 0x36, 0x37, 0x38, 0x38, 0x3a, + 0x3b, 0x3e, 0x40, 0x40, 0x41, 0x42, 0x43, 0x42, + 0x43, 0x42, 0x40, 0x40, 0x3f, 0x3c, 0x3b, 0x39, + 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x33, + 0x32, 0x32, 0x34, 0x35, 0x35, 0x36, 0x39, 0x39, + 0x3a, 0x3c, 0x3c, 0x3f, 0x40, 0x41, 0x43, 0x45, + 0x45, 0x47, 0x48, 0x4a, 0x4b, 0x4d, 0x50, 0x53, + 0x56, 0x59, 0x5c, 0x5f, 0x60, 0x65, 0x64, 0x66, + 0x68, 0x66, 0x64, 0x61, 0x5e, 0x5a, 0x59, 0x56, + 0x54, 0x52, 0x51, 0x50, 0x4e, 0x4c, 0x4d, 0x4f, + 0x4f, 0x4f, 0x50, 0x50, 0x4f, 0x4f, 0x4e, 0x4d, + 0x4c, 0x4b, 0x49, 0x47, 0x45, 0x44, 0x43, 0x43, + 0x42, 0x43, 0x44, 0x44, 0x46, 0x47, 0x49, 0x4d, + 0x4f, 0x51, 0x53, 0x54, 0x53, 0x54, 0x54, 0x53, + 0x53, 0x51, 0x4e, 0x4b, 0x4a, 0x47, 0x45, 0x44, + 0x44, 0x45, 0x46, }, + { 0x20, 0x28, 0x26, 0x26, 0x25, 0x24, 0x27, 0x27, + 0x27, 0x29, 0x2c, 0x2e, 0x31, 0x34, 0x37, 0x3b, + 0x3e, 0x41, 0x45, 0x48, 0x4a, 0x4c, 0x4e, 0x4e, + 0x50, 0x51, 0x52, 0x51, 0x4f, 0x4b, 0x4a, 0x49, + 0x45, 0x43, 0x3f, 0x3c, 0x3a, 0x36, 0x33, 0x30, + 0x2f, 0x2d, 0x2a, 0x28, 0x27, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x24, 0x25, 0x27, 0x27, 0x29, 0x2a, + 0x2c, 0x2e, 0x31, 0x34, 0x37, 0x39, 0x3b, 0x3c, + 0x3d, 0x3e, 0x3f, 0x40, 0x3e, 0x3d, 0x3d, 0x3a, + 0x38, 0x36, 0x34, 0x31, 0x2f, 0x2c, 0x29, 0x27, + 0x25, 0x21, 0x21, 0x1f, 0x1c, 0x1d, 0x19, 0x18, + 0x16, 0x15, 0x15, 0x13, 0x12, 0x11, 0x11, 0x0f, + 0x0f, 0x0e, 0x0f, 0x0f, 0x0e, 0x0d, 0x0d, 0x0d, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0d, 0x0d, 0x0d, 0x0e, 0x0e, 0x0e, 0x0f, 0x10, + 0x10, 0x10, 0x12, 0x13, 0x15, 0x16, 0x18, 0x1a, + 0x1c, 0x1d, 0x20, 0x22, 0x25, 0x27, 0x2a, 0x2e, + 0x30, 0x34, 0x38, 0x39, 0x3a, 0x3b, 0x3b, 0x3b, + 0x3c, 0x3d, 0x3c, 0x3b, 0x3a, 0x39, 0x38, 0x37, + 0x36, 0x36, 0x38, 0x37, 0x37, 0x37, 0x38, 0x3a, + 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x40, 0x40, + 0x42, 0x40, 0x3f, 0x3e, 0x3d, 0x3b, 0x3a, 0x39, + 0x37, 0x36, 0x36, 0x35, 0x34, 0x34, 0x33, 0x33, + 0x33, 0x34, 0x35, 0x35, 0x35, 0x36, 0x38, 0x39, + 0x3a, 0x3b, 0x3d, 0x3f, 0x42, 0x43, 0x45, 0x45, + 0x46, 0x48, 0x49, 0x4b, 0x4b, 0x4d, 0x50, 0x53, + 0x56, 0x57, 0x5a, 0x5c, 0x5e, 0x61, 0x63, 0x65, + 0x66, 0x64, 0x62, 0x5f, 0x5c, 0x59, 0x58, 0x56, + 0x55, 0x54, 0x52, 0x51, 0x50, 0x51, 0x51, 0x52, + 0x52, 0x52, 0x52, 0x52, 0x51, 0x51, 0x51, 0x50, + 0x4f, 0x4e, 0x4c, 0x4a, 0x47, 0x46, 0x45, 0x45, + 0x45, 0x46, 0x46, 0x46, 0x4a, 0x4c, 0x4d, 0x52, + 0x54, 0x56, 0x58, 0x58, 0x56, 0x57, 0x57, 0x56, + 0x55, 0x53, 0x50, 0x4d, 0x49, 0x45, 0x44, 0x44, + 0x43, 0x44, 0x45, }, + { 0x1f, 0x27, 0x24, 0x23, 0x25, 0x24, 0x25, 0x26, + 0x26, 0x28, 0x2b, 0x2e, 0x31, 0x34, 0x37, 0x3a, + 0x3d, 0x41, 0x45, 0x48, 0x4b, 0x4d, 0x4f, 0x4e, + 0x50, 0x51, 0x52, 0x50, 0x4f, 0x4b, 0x4a, 0x49, + 0x45, 0x43, 0x3f, 0x3c, 0x3a, 0x36, 0x33, 0x30, + 0x2f, 0x2d, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, + 0x23, 0x25, 0x24, 0x25, 0x27, 0x27, 0x29, 0x2a, + 0x2c, 0x2f, 0x32, 0x34, 0x37, 0x39, 0x3b, 0x3c, + 0x3e, 0x3f, 0x3f, 0x40, 0x3e, 0x3d, 0x3c, 0x3a, + 0x38, 0x36, 0x34, 0x31, 0x30, 0x2c, 0x29, 0x28, + 0x25, 0x23, 0x22, 0x1f, 0x1c, 0x1c, 0x18, 0x18, + 0x16, 0x14, 0x14, 0x13, 0x11, 0x11, 0x11, 0x0f, + 0x0f, 0x0e, 0x0f, 0x0f, 0x0e, 0x0d, 0x0d, 0x0d, + 0x0c, 0x0c, 0x0b, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0d, 0x0e, 0x0e, 0x0f, 0x0d, 0x0f, 0x10, 0x10, + 0x10, 0x11, 0x13, 0x14, 0x15, 0x16, 0x19, 0x1a, + 0x1c, 0x1f, 0x20, 0x23, 0x26, 0x28, 0x2a, 0x2e, + 0x31, 0x35, 0x38, 0x39, 0x3a, 0x3c, 0x3d, 0x3d, + 0x3e, 0x3e, 0x3d, 0x3c, 0x3a, 0x3a, 0x39, 0x39, + 0x38, 0x37, 0x38, 0x38, 0x37, 0x38, 0x39, 0x3a, + 0x3c, 0x3c, 0x3d, 0x3e, 0x3f, 0x3f, 0x40, 0x3f, + 0x41, 0x40, 0x3e, 0x3e, 0x3d, 0x3b, 0x3b, 0x39, + 0x37, 0x37, 0x35, 0x36, 0x34, 0x34, 0x34, 0x35, + 0x35, 0x34, 0x34, 0x35, 0x35, 0x37, 0x38, 0x39, + 0x3a, 0x3c, 0x3f, 0x3f, 0x43, 0x43, 0x45, 0x47, + 0x48, 0x48, 0x4a, 0x4b, 0x4e, 0x4d, 0x51, 0x53, + 0x56, 0x58, 0x59, 0x5b, 0x5d, 0x60, 0x62, 0x63, + 0x64, 0x63, 0x61, 0x5e, 0x5c, 0x5a, 0x57, 0x56, + 0x55, 0x54, 0x53, 0x52, 0x51, 0x51, 0x52, 0x52, + 0x54, 0x54, 0x55, 0x55, 0x55, 0x54, 0x54, 0x53, + 0x52, 0x50, 0x4e, 0x4d, 0x4b, 0x4a, 0x48, 0x48, + 0x48, 0x48, 0x4a, 0x4b, 0x4d, 0x4f, 0x52, 0x55, + 0x58, 0x5a, 0x5b, 0x5b, 0x5b, 0x5b, 0x5a, 0x59, + 0x58, 0x55, 0x51, 0x4e, 0x4a, 0x46, 0x45, 0x44, + 0x44, 0x44, 0x44, }, + { 0x1e, 0x26, 0x23, 0x23, 0x25, 0x24, 0x25, 0x26, + 0x26, 0x28, 0x2b, 0x2e, 0x31, 0x34, 0x37, 0x3a, + 0x3e, 0x42, 0x45, 0x48, 0x4b, 0x4d, 0x4f, 0x4f, + 0x50, 0x51, 0x52, 0x50, 0x4f, 0x4b, 0x4a, 0x48, + 0x46, 0x44, 0x3f, 0x3b, 0x39, 0x36, 0x33, 0x30, + 0x2f, 0x2d, 0x2a, 0x28, 0x27, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x24, 0x25, 0x27, 0x27, 0x29, 0x2a, + 0x2c, 0x2f, 0x32, 0x34, 0x37, 0x39, 0x3b, 0x3d, + 0x3e, 0x3f, 0x41, 0x41, 0x40, 0x3e, 0x3d, 0x3b, + 0x38, 0x37, 0x34, 0x32, 0x30, 0x2c, 0x2a, 0x27, + 0x26, 0x23, 0x22, 0x20, 0x1d, 0x1b, 0x1a, 0x19, + 0x17, 0x15, 0x15, 0x13, 0x12, 0x12, 0x11, 0x0f, + 0x11, 0x0f, 0x0e, 0x0e, 0x0d, 0x0d, 0x0d, 0x0c, + 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, + 0x0e, 0x0e, 0x0e, 0x0f, 0x10, 0x10, 0x11, 0x11, + 0x11, 0x13, 0x16, 0x15, 0x15, 0x18, 0x1a, 0x1b, + 0x1d, 0x20, 0x22, 0x24, 0x27, 0x29, 0x2c, 0x30, + 0x33, 0x37, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3e, + 0x40, 0x40, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3a, + 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3a, 0x3b, 0x3d, + 0x3d, 0x3f, 0x40, 0x40, 0x3f, 0x41, 0x41, 0x41, + 0x41, 0x41, 0x40, 0x40, 0x3f, 0x3e, 0x3c, 0x3b, + 0x3a, 0x39, 0x37, 0x36, 0x36, 0x35, 0x35, 0x36, + 0x36, 0x35, 0x35, 0x36, 0x36, 0x38, 0x39, 0x39, + 0x3b, 0x3c, 0x3e, 0x40, 0x41, 0x43, 0x45, 0x47, + 0x48, 0x48, 0x4b, 0x4c, 0x4d, 0x4f, 0x51, 0x53, + 0x56, 0x56, 0x59, 0x5b, 0x5d, 0x5f, 0x61, 0x62, + 0x63, 0x63, 0x61, 0x5e, 0x5c, 0x5a, 0x59, 0x57, + 0x56, 0x54, 0x54, 0x53, 0x52, 0x53, 0x53, 0x55, + 0x56, 0x56, 0x57, 0x57, 0x57, 0x57, 0x56, 0x56, + 0x55, 0x53, 0x51, 0x4f, 0x4d, 0x4b, 0x49, 0x4b, + 0x4b, 0x4c, 0x4d, 0x4e, 0x51, 0x53, 0x55, 0x58, + 0x5b, 0x5c, 0x60, 0x60, 0x5f, 0x5e, 0x5d, 0x5c, + 0x5a, 0x57, 0x53, 0x4f, 0x4b, 0x46, 0x45, 0x44, + 0x44, 0x44, 0x44, }, + { 0x1d, 0x25, 0x22, 0x22, 0x23, 0x23, 0x24, 0x25, + 0x25, 0x28, 0x2b, 0x2e, 0x31, 0x34, 0x37, 0x3a, + 0x3e, 0x42, 0x45, 0x48, 0x4b, 0x4d, 0x4f, 0x4f, + 0x50, 0x51, 0x52, 0x50, 0x4f, 0x4b, 0x4a, 0x47, + 0x45, 0x43, 0x3f, 0x3c, 0x38, 0x35, 0x33, 0x30, + 0x2f, 0x2d, 0x2a, 0x28, 0x27, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x24, 0x25, 0x27, 0x27, 0x29, 0x2a, + 0x2b, 0x2f, 0x32, 0x34, 0x37, 0x39, 0x3c, 0x3d, + 0x3e, 0x3f, 0x40, 0x41, 0x40, 0x3e, 0x3d, 0x3b, + 0x39, 0x36, 0x34, 0x32, 0x30, 0x2d, 0x2a, 0x26, + 0x26, 0x24, 0x22, 0x1f, 0x1d, 0x1c, 0x1a, 0x19, + 0x18, 0x16, 0x15, 0x14, 0x12, 0x12, 0x12, 0x10, + 0x10, 0x0f, 0x0e, 0x10, 0x0e, 0x0e, 0x0d, 0x0c, + 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0e, 0x0d, 0x0e, + 0x0f, 0x0f, 0x0f, 0x10, 0x11, 0x11, 0x11, 0x12, + 0x13, 0x14, 0x16, 0x16, 0x18, 0x1a, 0x1b, 0x1c, + 0x1e, 0x21, 0x23, 0x25, 0x28, 0x2a, 0x2e, 0x32, + 0x34, 0x38, 0x3a, 0x3c, 0x3d, 0x3f, 0x40, 0x42, + 0x43, 0x43, 0x43, 0x42, 0x40, 0x3e, 0x3e, 0x3c, + 0x3b, 0x3b, 0x3c, 0x3a, 0x3b, 0x3b, 0x3e, 0x3e, + 0x40, 0x3f, 0x41, 0x41, 0x41, 0x42, 0x42, 0x43, + 0x42, 0x41, 0x41, 0x41, 0x40, 0x3e, 0x3d, 0x3c, + 0x3b, 0x3a, 0x39, 0x37, 0x36, 0x35, 0x36, 0x37, + 0x35, 0x36, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, + 0x3b, 0x3d, 0x3e, 0x40, 0x41, 0x41, 0x44, 0x46, + 0x48, 0x48, 0x4a, 0x4c, 0x4d, 0x4f, 0x51, 0x53, + 0x55, 0x57, 0x59, 0x5a, 0x5b, 0x5e, 0x5f, 0x61, + 0x62, 0x61, 0x60, 0x5e, 0x5c, 0x5a, 0x59, 0x58, + 0x56, 0x55, 0x54, 0x53, 0x53, 0x54, 0x54, 0x55, + 0x57, 0x57, 0x58, 0x59, 0x5a, 0x58, 0x59, 0x58, + 0x57, 0x55, 0x53, 0x52, 0x4f, 0x4e, 0x4d, 0x4d, + 0x4d, 0x4f, 0x51, 0x50, 0x54, 0x56, 0x59, 0x5c, + 0x5f, 0x61, 0x64, 0x64, 0x63, 0x61, 0x5e, 0x5e, + 0x5c, 0x59, 0x54, 0x50, 0x4c, 0x46, 0x45, 0x44, + 0x44, 0x44, 0x44, }, + { 0x1c, 0x24, 0x21, 0x21, 0x21, 0x22, 0x23, 0x23, + 0x25, 0x27, 0x2a, 0x2e, 0x31, 0x33, 0x37, 0x3b, + 0x3e, 0x42, 0x45, 0x48, 0x4b, 0x4c, 0x50, 0x4f, + 0x50, 0x51, 0x52, 0x50, 0x4e, 0x4b, 0x4a, 0x49, + 0x45, 0x42, 0x3f, 0x3c, 0x38, 0x35, 0x33, 0x30, + 0x2f, 0x2d, 0x2a, 0x28, 0x27, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x24, 0x25, 0x27, 0x27, 0x29, 0x2a, + 0x2b, 0x2f, 0x32, 0x34, 0x38, 0x39, 0x3c, 0x3d, + 0x3e, 0x3e, 0x40, 0x41, 0x40, 0x3e, 0x3c, 0x3a, + 0x39, 0x37, 0x35, 0x33, 0x30, 0x2d, 0x2b, 0x28, + 0x26, 0x23, 0x23, 0x20, 0x1e, 0x1b, 0x19, 0x19, + 0x17, 0x16, 0x15, 0x14, 0x12, 0x12, 0x11, 0x10, + 0x0f, 0x0e, 0x0e, 0x10, 0x0e, 0x0d, 0x0c, 0x0c, + 0x0c, 0x0d, 0x0d, 0x0d, 0x0d, 0x0e, 0x0d, 0x0e, + 0x0f, 0x0f, 0x0f, 0x10, 0x11, 0x11, 0x12, 0x14, + 0x14, 0x14, 0x16, 0x18, 0x19, 0x1b, 0x1c, 0x1e, + 0x20, 0x23, 0x26, 0x27, 0x29, 0x2c, 0x2f, 0x33, + 0x36, 0x38, 0x3b, 0x3e, 0x3e, 0x42, 0x43, 0x46, + 0x46, 0x46, 0x46, 0x44, 0x42, 0x41, 0x3f, 0x3e, + 0x3d, 0x3d, 0x3e, 0x3d, 0x3d, 0x3e, 0x3e, 0x40, + 0x40, 0x40, 0x43, 0x43, 0x42, 0x43, 0x45, 0x43, + 0x43, 0x43, 0x42, 0x42, 0x41, 0x40, 0x40, 0x3e, + 0x3c, 0x3a, 0x3a, 0x38, 0x36, 0x36, 0x36, 0x36, + 0x37, 0x37, 0x36, 0x38, 0x38, 0x39, 0x3b, 0x3b, + 0x3e, 0x3e, 0x3e, 0x40, 0x41, 0x43, 0x45, 0x46, + 0x46, 0x49, 0x4c, 0x4c, 0x4d, 0x4f, 0x51, 0x54, + 0x56, 0x57, 0x58, 0x5a, 0x5c, 0x5e, 0x60, 0x60, + 0x61, 0x61, 0x60, 0x5f, 0x5c, 0x5a, 0x59, 0x58, + 0x57, 0x57, 0x55, 0x54, 0x53, 0x55, 0x55, 0x58, + 0x58, 0x59, 0x5a, 0x5a, 0x5a, 0x5b, 0x5b, 0x5b, + 0x5a, 0x59, 0x56, 0x54, 0x53, 0x4e, 0x4e, 0x50, + 0x50, 0x51, 0x52, 0x52, 0x57, 0x59, 0x5d, 0x60, + 0x63, 0x63, 0x66, 0x66, 0x66, 0x64, 0x63, 0x61, + 0x60, 0x5b, 0x55, 0x51, 0x4d, 0x48, 0x45, 0x44, + 0x43, 0x43, 0x43, }, + { 0x1b, 0x23, 0x20, 0x21, 0x22, 0x22, 0x23, 0x24, + 0x26, 0x27, 0x2a, 0x2e, 0x31, 0x33, 0x37, 0x3b, + 0x3d, 0x42, 0x46, 0x49, 0x4a, 0x4c, 0x4f, 0x4f, + 0x50, 0x50, 0x52, 0x50, 0x4e, 0x4b, 0x4b, 0x49, + 0x45, 0x42, 0x3e, 0x3c, 0x38, 0x35, 0x33, 0x30, + 0x2f, 0x2d, 0x2a, 0x28, 0x27, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x24, 0x25, 0x27, 0x27, 0x29, 0x2a, + 0x2c, 0x2f, 0x32, 0x35, 0x38, 0x3a, 0x3c, 0x3d, + 0x3e, 0x3e, 0x40, 0x41, 0x40, 0x3f, 0x3d, 0x3b, + 0x3a, 0x38, 0x36, 0x33, 0x30, 0x2d, 0x2b, 0x29, + 0x27, 0x24, 0x24, 0x21, 0x1e, 0x1c, 0x1b, 0x1a, + 0x18, 0x17, 0x16, 0x15, 0x13, 0x12, 0x10, 0x0f, + 0x10, 0x0f, 0x0e, 0x0f, 0x0e, 0x0d, 0x0d, 0x0d, + 0x0d, 0x0d, 0x0e, 0x0e, 0x0e, 0x0f, 0x0e, 0x0f, + 0x10, 0x11, 0x11, 0x12, 0x13, 0x13, 0x14, 0x15, + 0x15, 0x16, 0x17, 0x1a, 0x1b, 0x1d, 0x1e, 0x20, + 0x21, 0x25, 0x27, 0x29, 0x2b, 0x2d, 0x31, 0x35, + 0x37, 0x39, 0x3c, 0x3f, 0x40, 0x43, 0x46, 0x47, + 0x4a, 0x49, 0x48, 0x46, 0x45, 0x43, 0x42, 0x41, + 0x3f, 0x40, 0x3f, 0x3f, 0x40, 0x3f, 0x41, 0x43, + 0x43, 0x43, 0x44, 0x45, 0x45, 0x45, 0x45, 0x45, + 0x45, 0x45, 0x44, 0x43, 0x43, 0x42, 0x42, 0x40, + 0x3e, 0x3d, 0x3c, 0x39, 0x38, 0x38, 0x38, 0x38, + 0x38, 0x36, 0x38, 0x39, 0x39, 0x3a, 0x3c, 0x3d, + 0x3e, 0x3e, 0x3f, 0x41, 0x42, 0x42, 0x43, 0x45, + 0x46, 0x49, 0x4b, 0x4d, 0x4f, 0x50, 0x53, 0x54, + 0x57, 0x58, 0x5a, 0x5c, 0x5b, 0x5e, 0x60, 0x61, + 0x60, 0x60, 0x5f, 0x5f, 0x5d, 0x5b, 0x5b, 0x59, + 0x58, 0x57, 0x56, 0x55, 0x55, 0x55, 0x57, 0x59, + 0x5b, 0x5b, 0x5d, 0x5c, 0x5c, 0x5e, 0x5e, 0x5e, + 0x5d, 0x5b, 0x59, 0x56, 0x54, 0x51, 0x51, 0x51, + 0x52, 0x55, 0x56, 0x56, 0x5a, 0x5d, 0x5f, 0x63, + 0x66, 0x68, 0x6b, 0x6b, 0x68, 0x67, 0x66, 0x64, + 0x61, 0x5d, 0x57, 0x52, 0x4f, 0x49, 0x46, 0x45, + 0x43, 0x43, 0x43, }, + { 0x1a, 0x22, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, + 0x26, 0x27, 0x2a, 0x2d, 0x31, 0x33, 0x37, 0x3b, + 0x3d, 0x41, 0x46, 0x49, 0x4a, 0x4d, 0x4f, 0x4f, + 0x50, 0x51, 0x52, 0x50, 0x4e, 0x4b, 0x4b, 0x48, + 0x44, 0x42, 0x3e, 0x3c, 0x39, 0x35, 0x33, 0x30, + 0x2f, 0x2d, 0x2a, 0x28, 0x27, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x24, 0x25, 0x27, 0x27, 0x29, 0x2a, + 0x2d, 0x2f, 0x32, 0x35, 0x39, 0x3a, 0x3c, 0x3d, + 0x3e, 0x3f, 0x40, 0x41, 0x40, 0x3f, 0x3e, 0x3c, + 0x3a, 0x38, 0x36, 0x33, 0x31, 0x2d, 0x2c, 0x29, + 0x27, 0x26, 0x24, 0x21, 0x1f, 0x1d, 0x1c, 0x1a, + 0x19, 0x18, 0x16, 0x15, 0x14, 0x13, 0x12, 0x10, + 0x11, 0x10, 0x0f, 0x0f, 0x0f, 0x0e, 0x0e, 0x0e, + 0x0f, 0x0f, 0x0e, 0x0e, 0x0e, 0x0f, 0x0f, 0x10, + 0x11, 0x12, 0x12, 0x13, 0x15, 0x15, 0x16, 0x16, + 0x17, 0x18, 0x1a, 0x1b, 0x1c, 0x1e, 0x1f, 0x21, + 0x22, 0x25, 0x27, 0x2a, 0x2c, 0x2e, 0x33, 0x36, + 0x39, 0x3a, 0x3d, 0x40, 0x41, 0x45, 0x47, 0x4a, + 0x4c, 0x4d, 0x4c, 0x4a, 0x48, 0x45, 0x44, 0x41, + 0x42, 0x42, 0x42, 0x42, 0x42, 0x43, 0x43, 0x44, + 0x45, 0x47, 0x47, 0x48, 0x47, 0x48, 0x47, 0x47, + 0x48, 0x48, 0x46, 0x46, 0x46, 0x43, 0x43, 0x41, + 0x3f, 0x3e, 0x3b, 0x39, 0x38, 0x37, 0x37, 0x37, + 0x38, 0x38, 0x37, 0x39, 0x39, 0x3a, 0x3c, 0x3e, + 0x3e, 0x3f, 0x3f, 0x3f, 0x42, 0x43, 0x43, 0x45, + 0x47, 0x48, 0x4b, 0x4c, 0x4e, 0x50, 0x51, 0x54, + 0x56, 0x58, 0x5a, 0x5c, 0x5c, 0x5f, 0x5f, 0x5f, + 0x61, 0x60, 0x5f, 0x5f, 0x5e, 0x5b, 0x5c, 0x5b, + 0x59, 0x59, 0x57, 0x56, 0x55, 0x56, 0x57, 0x59, + 0x5a, 0x5b, 0x5c, 0x5c, 0x5d, 0x5e, 0x5e, 0x5d, + 0x5e, 0x5c, 0x5a, 0x57, 0x55, 0x52, 0x51, 0x52, + 0x53, 0x55, 0x57, 0x58, 0x5c, 0x5e, 0x61, 0x65, + 0x69, 0x6b, 0x6c, 0x6b, 0x6a, 0x69, 0x67, 0x64, + 0x61, 0x5d, 0x59, 0x53, 0x4d, 0x48, 0x46, 0x45, + 0x44, 0x44, 0x43, }, + { 0x1a, 0x21, 0x1e, 0x1f, 0x20, 0x21, 0x23, 0x24, + 0x25, 0x28, 0x2a, 0x2e, 0x31, 0x33, 0x37, 0x3b, + 0x3e, 0x41, 0x46, 0x49, 0x4b, 0x4d, 0x4f, 0x4e, + 0x50, 0x51, 0x51, 0x50, 0x4e, 0x4b, 0x4a, 0x48, + 0x44, 0x42, 0x3e, 0x3c, 0x39, 0x35, 0x32, 0x30, + 0x2f, 0x2d, 0x29, 0x27, 0x27, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x24, 0x25, 0x26, 0x27, 0x29, 0x2a, + 0x2c, 0x2f, 0x32, 0x35, 0x38, 0x3b, 0x3c, 0x3e, + 0x3f, 0x3f, 0x40, 0x41, 0x40, 0x3f, 0x3e, 0x3c, + 0x3a, 0x39, 0x36, 0x34, 0x31, 0x2d, 0x2c, 0x29, + 0x27, 0x26, 0x24, 0x21, 0x1f, 0x1d, 0x1c, 0x1a, + 0x19, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x10, + 0x11, 0x10, 0x0f, 0x0f, 0x0f, 0x0e, 0x0e, 0x0e, + 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0f, 0x0f, 0x10, + 0x11, 0x13, 0x14, 0x14, 0x15, 0x16, 0x17, 0x19, + 0x19, 0x1a, 0x1c, 0x1d, 0x1e, 0x20, 0x22, 0x24, + 0x25, 0x27, 0x29, 0x2c, 0x2e, 0x31, 0x35, 0x38, + 0x3a, 0x3d, 0x41, 0x42, 0x45, 0x48, 0x4c, 0x4e, + 0x4f, 0x4f, 0x4f, 0x4d, 0x4b, 0x49, 0x47, 0x47, + 0x46, 0x45, 0x45, 0x45, 0x44, 0x44, 0x46, 0x47, + 0x48, 0x49, 0x4b, 0x4b, 0x4a, 0x4b, 0x4b, 0x4a, + 0x4b, 0x4a, 0x49, 0x49, 0x48, 0x46, 0x46, 0x44, + 0x42, 0x41, 0x3d, 0x3b, 0x3a, 0x38, 0x38, 0x38, + 0x37, 0x37, 0x39, 0x38, 0x3a, 0x3a, 0x3c, 0x3c, + 0x3e, 0x40, 0x40, 0x41, 0x43, 0x43, 0x45, 0x46, + 0x48, 0x49, 0x4b, 0x4e, 0x4f, 0x50, 0x53, 0x55, + 0x57, 0x59, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, + 0x60, 0x60, 0x5f, 0x5f, 0x5e, 0x5c, 0x5b, 0x5a, + 0x59, 0x58, 0x57, 0x57, 0x56, 0x56, 0x57, 0x58, + 0x59, 0x5a, 0x5b, 0x5c, 0x5c, 0x5d, 0x5e, 0x5d, + 0x5c, 0x5b, 0x58, 0x57, 0x54, 0x52, 0x52, 0x53, + 0x54, 0x57, 0x58, 0x58, 0x5b, 0x5e, 0x62, 0x65, + 0x69, 0x6b, 0x6d, 0x6c, 0x6a, 0x69, 0x67, 0x64, + 0x62, 0x5e, 0x59, 0x54, 0x4d, 0x48, 0x47, 0x46, + 0x45, 0x45, 0x44, }, + { 0x1a, 0x21, 0x1e, 0x1f, 0x20, 0x21, 0x23, 0x24, + 0x25, 0x28, 0x2a, 0x2e, 0x31, 0x34, 0x37, 0x3b, + 0x3e, 0x42, 0x47, 0x49, 0x4b, 0x4d, 0x4f, 0x4f, + 0x50, 0x51, 0x51, 0x50, 0x50, 0x4c, 0x4a, 0x47, + 0x44, 0x42, 0x3e, 0x3c, 0x39, 0x35, 0x32, 0x31, + 0x2f, 0x2d, 0x29, 0x27, 0x26, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x25, 0x25, 0x26, 0x27, 0x29, 0x2b, + 0x2c, 0x2f, 0x33, 0x35, 0x38, 0x3a, 0x3c, 0x3e, + 0x40, 0x40, 0x41, 0x42, 0x41, 0x3f, 0x3f, 0x3d, + 0x3b, 0x39, 0x36, 0x33, 0x32, 0x2e, 0x2d, 0x2a, + 0x27, 0x26, 0x25, 0x22, 0x1f, 0x1d, 0x1c, 0x1b, + 0x19, 0x17, 0x17, 0x16, 0x15, 0x14, 0x12, 0x11, + 0x11, 0x11, 0x10, 0x10, 0x0f, 0x0f, 0x0f, 0x0f, + 0x0f, 0x0f, 0x10, 0x11, 0x10, 0x11, 0x11, 0x12, + 0x11, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1b, + 0x1c, 0x1c, 0x1e, 0x20, 0x21, 0x22, 0x23, 0x25, + 0x27, 0x2a, 0x2c, 0x2f, 0x31, 0x35, 0x38, 0x3b, + 0x3d, 0x40, 0x44, 0x47, 0x49, 0x4c, 0x4f, 0x51, + 0x53, 0x53, 0x53, 0x51, 0x50, 0x4e, 0x4c, 0x4b, + 0x4a, 0x49, 0x49, 0x49, 0x49, 0x4a, 0x4a, 0x4d, + 0x4e, 0x4e, 0x4f, 0x50, 0x4f, 0x50, 0x51, 0x50, + 0x50, 0x4e, 0x4d, 0x4c, 0x4b, 0x48, 0x48, 0x47, + 0x44, 0x42, 0x3f, 0x3d, 0x3b, 0x3a, 0x39, 0x39, + 0x39, 0x38, 0x39, 0x3b, 0x3a, 0x3c, 0x3e, 0x3d, + 0x40, 0x40, 0x40, 0x42, 0x42, 0x42, 0x45, 0x46, + 0x47, 0x49, 0x4c, 0x4e, 0x50, 0x50, 0x53, 0x56, + 0x58, 0x59, 0x5d, 0x5d, 0x5e, 0x60, 0x61, 0x61, + 0x62, 0x61, 0x60, 0x60, 0x5e, 0x5d, 0x5d, 0x5b, + 0x57, 0x58, 0x56, 0x55, 0x55, 0x56, 0x56, 0x59, + 0x59, 0x58, 0x5a, 0x5a, 0x5a, 0x5c, 0x5c, 0x5c, + 0x5b, 0x5b, 0x58, 0x57, 0x54, 0x53, 0x52, 0x53, + 0x54, 0x57, 0x58, 0x59, 0x5c, 0x5f, 0x63, 0x67, + 0x6b, 0x6d, 0x6e, 0x6e, 0x6b, 0x6a, 0x68, 0x64, + 0x62, 0x5e, 0x58, 0x53, 0x4f, 0x49, 0x47, 0x46, + 0x45, 0x45, 0x44, }, + { 0x19, 0x20, 0x1e, 0x1e, 0x1f, 0x20, 0x22, 0x23, + 0x25, 0x27, 0x2a, 0x2e, 0x31, 0x34, 0x37, 0x3a, + 0x3e, 0x41, 0x46, 0x49, 0x4a, 0x4d, 0x4f, 0x4e, + 0x50, 0x51, 0x51, 0x4f, 0x4f, 0x4d, 0x49, 0x47, + 0x44, 0x42, 0x3e, 0x3c, 0x39, 0x36, 0x32, 0x31, + 0x2f, 0x2d, 0x29, 0x27, 0x26, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x25, 0x25, 0x26, 0x28, 0x29, 0x2b, + 0x2c, 0x2f, 0x33, 0x35, 0x38, 0x3a, 0x3c, 0x3e, + 0x3f, 0x3f, 0x41, 0x42, 0x41, 0x3f, 0x3f, 0x3d, + 0x3c, 0x39, 0x36, 0x33, 0x32, 0x2e, 0x2d, 0x2a, + 0x27, 0x26, 0x25, 0x22, 0x1f, 0x1e, 0x1d, 0x1b, + 0x1a, 0x17, 0x17, 0x17, 0x14, 0x14, 0x12, 0x11, + 0x11, 0x12, 0x11, 0x11, 0x10, 0x10, 0x10, 0x10, + 0x10, 0x10, 0x11, 0x11, 0x11, 0x12, 0x13, 0x14, + 0x14, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1c, 0x1e, + 0x1e, 0x1f, 0x22, 0x23, 0x23, 0x24, 0x25, 0x27, + 0x2a, 0x2d, 0x2f, 0x31, 0x35, 0x38, 0x3a, 0x3e, + 0x41, 0x44, 0x48, 0x4b, 0x4d, 0x51, 0x53, 0x55, + 0x57, 0x57, 0x56, 0x55, 0x54, 0x52, 0x52, 0x50, + 0x4e, 0x50, 0x4e, 0x4d, 0x4d, 0x4d, 0x4f, 0x51, + 0x51, 0x52, 0x54, 0x55, 0x55, 0x55, 0x57, 0x55, + 0x54, 0x53, 0x52, 0x4e, 0x4d, 0x4b, 0x4a, 0x49, + 0x46, 0x44, 0x41, 0x3f, 0x3d, 0x3b, 0x3a, 0x3a, + 0x39, 0x39, 0x39, 0x39, 0x3a, 0x3b, 0x3d, 0x3e, + 0x3f, 0x40, 0x41, 0x42, 0x44, 0x44, 0x45, 0x47, + 0x49, 0x49, 0x4a, 0x4d, 0x50, 0x51, 0x53, 0x57, + 0x5a, 0x5b, 0x5e, 0x5f, 0x60, 0x61, 0x62, 0x62, + 0x63, 0x62, 0x60, 0x60, 0x5e, 0x5c, 0x5c, 0x59, + 0x58, 0x56, 0x55, 0x55, 0x55, 0x55, 0x55, 0x54, + 0x56, 0x56, 0x57, 0x58, 0x58, 0x59, 0x5a, 0x59, + 0x58, 0x57, 0x56, 0x55, 0x54, 0x52, 0x53, 0x53, + 0x53, 0x56, 0x57, 0x59, 0x5b, 0x5e, 0x62, 0x66, + 0x6a, 0x6c, 0x6d, 0x6e, 0x6b, 0x69, 0x67, 0x64, + 0x61, 0x5d, 0x58, 0x54, 0x50, 0x4a, 0x47, 0x46, + 0x45, 0x45, 0x44, }, + { 0x1a, 0x21, 0x1e, 0x1f, 0x1f, 0x20, 0x22, 0x23, + 0x25, 0x27, 0x2b, 0x2e, 0x31, 0x34, 0x37, 0x3b, + 0x3d, 0x42, 0x45, 0x49, 0x4a, 0x4d, 0x4e, 0x4e, + 0x51, 0x52, 0x50, 0x4f, 0x4f, 0x4c, 0x49, 0x48, + 0x45, 0x42, 0x3e, 0x3b, 0x39, 0x36, 0x32, 0x32, + 0x2f, 0x2c, 0x2a, 0x28, 0x26, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x24, 0x25, 0x25, 0x28, 0x29, 0x2b, + 0x2d, 0x2f, 0x33, 0x35, 0x38, 0x3a, 0x3c, 0x3e, + 0x3f, 0x3f, 0x41, 0x42, 0x41, 0x3f, 0x3e, 0x3c, + 0x3c, 0x3a, 0x37, 0x33, 0x32, 0x2f, 0x2d, 0x2b, + 0x28, 0x26, 0x25, 0x22, 0x20, 0x1e, 0x1d, 0x1b, + 0x1a, 0x17, 0x17, 0x16, 0x14, 0x14, 0x12, 0x11, + 0x12, 0x11, 0x11, 0x11, 0x11, 0x10, 0x10, 0x10, + 0x10, 0x11, 0x12, 0x12, 0x12, 0x13, 0x14, 0x14, + 0x16, 0x18, 0x19, 0x1a, 0x1b, 0x1d, 0x1e, 0x1f, + 0x21, 0x22, 0x23, 0x25, 0x26, 0x26, 0x28, 0x2a, + 0x2c, 0x2e, 0x32, 0x34, 0x39, 0x39, 0x3d, 0x41, + 0x45, 0x47, 0x4c, 0x4e, 0x51, 0x54, 0x56, 0x58, + 0x5b, 0x5c, 0x5a, 0x59, 0x58, 0x56, 0x55, 0x53, + 0x53, 0x52, 0x52, 0x51, 0x52, 0x52, 0x53, 0x55, + 0x57, 0x58, 0x5a, 0x5a, 0x59, 0x5b, 0x59, 0x59, + 0x58, 0x57, 0x55, 0x53, 0x51, 0x4e, 0x4c, 0x4a, + 0x48, 0x46, 0x43, 0x40, 0x3e, 0x3c, 0x3b, 0x3b, + 0x38, 0x39, 0x38, 0x39, 0x3a, 0x3d, 0x3d, 0x3e, + 0x3f, 0x40, 0x41, 0x43, 0x44, 0x45, 0x46, 0x48, + 0x4a, 0x4b, 0x4d, 0x4e, 0x50, 0x52, 0x54, 0x56, + 0x59, 0x5c, 0x5e, 0x5f, 0x60, 0x62, 0x62, 0x63, + 0x63, 0x63, 0x61, 0x5f, 0x5e, 0x5d, 0x5c, 0x5b, + 0x59, 0x56, 0x56, 0x55, 0x54, 0x53, 0x53, 0x54, + 0x55, 0x54, 0x55, 0x55, 0x55, 0x57, 0x58, 0x57, + 0x57, 0x56, 0x55, 0x54, 0x54, 0x52, 0x52, 0x53, + 0x54, 0x55, 0x57, 0x58, 0x5b, 0x5e, 0x62, 0x65, + 0x69, 0x6b, 0x6d, 0x6e, 0x6a, 0x69, 0x67, 0x63, + 0x61, 0x5d, 0x58, 0x54, 0x4f, 0x4b, 0x48, 0x47, + 0x46, 0x45, 0x45, }, + { 0x1a, 0x21, 0x1e, 0x1f, 0x1f, 0x20, 0x22, 0x23, + 0x25, 0x27, 0x2b, 0x2d, 0x31, 0x34, 0x37, 0x3b, + 0x3d, 0x42, 0x45, 0x48, 0x4c, 0x4e, 0x4e, 0x4f, + 0x51, 0x52, 0x50, 0x50, 0x4f, 0x4c, 0x4a, 0x48, + 0x45, 0x42, 0x3f, 0x3b, 0x39, 0x36, 0x32, 0x31, + 0x2f, 0x2c, 0x2a, 0x28, 0x26, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x24, 0x25, 0x27, 0x28, 0x29, 0x2b, + 0x2d, 0x30, 0x33, 0x36, 0x39, 0x3b, 0x3d, 0x3f, + 0x3f, 0x40, 0x42, 0x43, 0x42, 0x40, 0x3e, 0x3c, + 0x3c, 0x3a, 0x37, 0x34, 0x32, 0x2f, 0x2d, 0x2c, + 0x2a, 0x27, 0x26, 0x23, 0x20, 0x1e, 0x1d, 0x1c, + 0x1a, 0x18, 0x18, 0x17, 0x15, 0x16, 0x14, 0x12, + 0x12, 0x12, 0x12, 0x12, 0x12, 0x11, 0x11, 0x12, + 0x12, 0x12, 0x13, 0x14, 0x14, 0x14, 0x15, 0x16, + 0x17, 0x19, 0x1b, 0x1c, 0x1e, 0x20, 0x20, 0x22, + 0x24, 0x25, 0x26, 0x27, 0x28, 0x2a, 0x2c, 0x2c, + 0x2f, 0x32, 0x35, 0x37, 0x3b, 0x3c, 0x41, 0x45, + 0x48, 0x4c, 0x50, 0x52, 0x54, 0x57, 0x5a, 0x5c, + 0x5f, 0x5f, 0x5f, 0x5d, 0x5c, 0x5b, 0x5a, 0x58, + 0x57, 0x57, 0x57, 0x56, 0x56, 0x57, 0x57, 0x5a, + 0x5c, 0x5e, 0x5f, 0x61, 0x5f, 0x5f, 0x5f, 0x5e, + 0x5d, 0x5c, 0x5a, 0x57, 0x55, 0x52, 0x4f, 0x4e, + 0x4a, 0x47, 0x46, 0x42, 0x41, 0x3e, 0x3d, 0x3c, + 0x3b, 0x3a, 0x39, 0x39, 0x3b, 0x3c, 0x3d, 0x3f, + 0x40, 0x42, 0x42, 0x44, 0x45, 0x46, 0x49, 0x49, + 0x4b, 0x4c, 0x4e, 0x4f, 0x51, 0x54, 0x57, 0x58, + 0x5b, 0x5d, 0x61, 0x61, 0x61, 0x63, 0x65, 0x65, + 0x64, 0x64, 0x62, 0x61, 0x60, 0x5e, 0x5d, 0x5c, + 0x59, 0x58, 0x56, 0x54, 0x53, 0x53, 0x53, 0x54, + 0x54, 0x53, 0x53, 0x54, 0x54, 0x54, 0x55, 0x55, + 0x56, 0x55, 0x54, 0x53, 0x53, 0x52, 0x52, 0x53, + 0x55, 0x56, 0x57, 0x58, 0x5b, 0x5e, 0x62, 0x66, + 0x69, 0x6b, 0x6d, 0x6d, 0x6b, 0x69, 0x67, 0x64, + 0x61, 0x5d, 0x58, 0x55, 0x50, 0x4b, 0x48, 0x47, + 0x46, 0x46, 0x46, }, + { 0x1a, 0x20, 0x1e, 0x1f, 0x1f, 0x21, 0x22, 0x23, + 0x25, 0x27, 0x2b, 0x2d, 0x31, 0x34, 0x37, 0x3b, + 0x3d, 0x42, 0x45, 0x48, 0x4c, 0x4e, 0x4f, 0x4f, + 0x51, 0x52, 0x51, 0x50, 0x4e, 0x4b, 0x4a, 0x48, + 0x45, 0x42, 0x3f, 0x3b, 0x38, 0x36, 0x32, 0x31, + 0x2f, 0x2c, 0x2a, 0x28, 0x26, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x24, 0x25, 0x27, 0x28, 0x29, 0x2b, + 0x2e, 0x30, 0x33, 0x36, 0x39, 0x3b, 0x3d, 0x3f, + 0x3f, 0x40, 0x41, 0x42, 0x41, 0x40, 0x3e, 0x3c, + 0x3c, 0x3a, 0x37, 0x34, 0x33, 0x30, 0x2e, 0x2b, + 0x29, 0x26, 0x24, 0x24, 0x20, 0x1f, 0x1d, 0x1d, + 0x1a, 0x19, 0x17, 0x16, 0x16, 0x16, 0x16, 0x14, + 0x13, 0x12, 0x13, 0x13, 0x13, 0x12, 0x12, 0x13, + 0x13, 0x14, 0x15, 0x15, 0x14, 0x15, 0x16, 0x18, + 0x19, 0x1b, 0x1c, 0x1e, 0x20, 0x21, 0x22, 0x24, + 0x27, 0x28, 0x29, 0x2a, 0x2c, 0x2c, 0x2d, 0x2f, + 0x32, 0x35, 0x37, 0x3a, 0x3c, 0x3e, 0x44, 0x48, + 0x4c, 0x50, 0x54, 0x56, 0x58, 0x5b, 0x5e, 0x60, + 0x61, 0x63, 0x62, 0x61, 0x60, 0x5f, 0x5e, 0x5e, + 0x5c, 0x5c, 0x5b, 0x5a, 0x5a, 0x5b, 0x5c, 0x5e, + 0x60, 0x63, 0x64, 0x65, 0x63, 0x62, 0x63, 0x63, + 0x61, 0x60, 0x5e, 0x5b, 0x58, 0x55, 0x51, 0x4f, + 0x4c, 0x4a, 0x47, 0x44, 0x42, 0x41, 0x3e, 0x3c, + 0x3b, 0x3a, 0x3a, 0x3b, 0x3b, 0x3c, 0x3e, 0x3f, + 0x40, 0x42, 0x43, 0x45, 0x46, 0x47, 0x49, 0x4a, + 0x4c, 0x4c, 0x4f, 0x51, 0x52, 0x55, 0x58, 0x5b, + 0x5c, 0x5f, 0x61, 0x62, 0x63, 0x64, 0x64, 0x65, + 0x66, 0x65, 0x63, 0x62, 0x5f, 0x5e, 0x5e, 0x5c, + 0x5b, 0x58, 0x56, 0x55, 0x54, 0x53, 0x52, 0x53, + 0x52, 0x52, 0x52, 0x52, 0x52, 0x53, 0x55, 0x55, + 0x55, 0x53, 0x53, 0x53, 0x52, 0x51, 0x52, 0x52, + 0x55, 0x55, 0x58, 0x58, 0x5b, 0x5d, 0x61, 0x65, + 0x68, 0x6a, 0x6c, 0x6b, 0x69, 0x68, 0x67, 0x64, + 0x61, 0x5e, 0x58, 0x54, 0x4f, 0x4b, 0x49, 0x48, + 0x47, 0x46, 0x45, }, + { 0x19, 0x20, 0x1d, 0x1f, 0x1f, 0x20, 0x23, 0x23, + 0x25, 0x27, 0x2b, 0x2d, 0x31, 0x34, 0x37, 0x3b, + 0x3d, 0x42, 0x45, 0x48, 0x4c, 0x4e, 0x4f, 0x4f, + 0x51, 0x52, 0x51, 0x50, 0x4e, 0x4b, 0x4a, 0x48, + 0x44, 0x42, 0x3f, 0x3a, 0x38, 0x36, 0x32, 0x30, + 0x2f, 0x2c, 0x2a, 0x28, 0x26, 0x26, 0x25, 0x24, + 0x23, 0x24, 0x24, 0x25, 0x26, 0x28, 0x29, 0x2b, + 0x2e, 0x30, 0x34, 0x36, 0x39, 0x3b, 0x3d, 0x3f, + 0x3f, 0x40, 0x41, 0x42, 0x41, 0x40, 0x3e, 0x3c, + 0x3c, 0x3a, 0x37, 0x34, 0x33, 0x30, 0x2e, 0x2b, + 0x29, 0x27, 0x25, 0x24, 0x21, 0x1f, 0x1e, 0x1c, + 0x1b, 0x19, 0x17, 0x16, 0x16, 0x16, 0x16, 0x14, + 0x13, 0x12, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, + 0x13, 0x14, 0x15, 0x14, 0x14, 0x14, 0x17, 0x19, + 0x1a, 0x1c, 0x1e, 0x20, 0x21, 0x23, 0x24, 0x26, + 0x29, 0x29, 0x2b, 0x2c, 0x2d, 0x2e, 0x30, 0x31, + 0x34, 0x38, 0x3b, 0x3c, 0x3f, 0x42, 0x47, 0x4c, + 0x50, 0x54, 0x57, 0x5b, 0x5c, 0x5e, 0x62, 0x63, + 0x66, 0x66, 0x66, 0x65, 0x64, 0x63, 0x61, 0x62, + 0x60, 0x60, 0x5f, 0x5e, 0x5e, 0x5f, 0x60, 0x62, + 0x65, 0x67, 0x69, 0x6a, 0x69, 0x68, 0x69, 0x67, + 0x66, 0x64, 0x62, 0x5f, 0x5c, 0x58, 0x54, 0x51, + 0x4e, 0x4b, 0x49, 0x45, 0x43, 0x41, 0x40, 0x3e, + 0x3c, 0x3a, 0x3b, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, + 0x41, 0x42, 0x44, 0x46, 0x46, 0x48, 0x49, 0x4b, + 0x4d, 0x50, 0x51, 0x53, 0x55, 0x57, 0x58, 0x5c, + 0x5f, 0x60, 0x63, 0x64, 0x64, 0x65, 0x66, 0x66, + 0x66, 0x65, 0x65, 0x63, 0x61, 0x5f, 0x5e, 0x5c, + 0x5a, 0x58, 0x56, 0x55, 0x54, 0x53, 0x52, 0x52, + 0x53, 0x52, 0x52, 0x52, 0x52, 0x53, 0x53, 0x53, + 0x54, 0x53, 0x53, 0x52, 0x53, 0x51, 0x53, 0x53, + 0x55, 0x57, 0x58, 0x59, 0x5b, 0x5d, 0x62, 0x64, + 0x68, 0x6a, 0x6c, 0x6b, 0x69, 0x68, 0x67, 0x64, + 0x61, 0x5d, 0x57, 0x54, 0x50, 0x4a, 0x48, 0x47, + 0x46, 0x45, 0x45, }, diff --git a/tests/tcg/hexagon/hvx_histogram_row.S b/tests/tcg/hexagon/hvx_histogram_row.S new file mode 100644 index 0000000000..5e42c33145 --- /dev/null +++ b/tests/tcg/hexagon/hvx_histogram_row.S @@ -0,0 +1,294 @@ +/* + * Copyright(c) 2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + + +/* + * void hvx_histogram_row(uint8_t *src, => r0 + * int stride, => r1 + * int width, => r2 + * int height, => r3 + * int *hist => r4) + */ + .text + .p2align 2 + .global hvx_histogram_row + .type hvx_histogram_row, @function +hvx_histogram_row: + { r2 = lsr(r2, #7) /* size / VLEN */ + r5 = and(r2, #127) /* size % VLEN */ + v1 = #0 + v0 = #0 + } + /* + * Step 1: Clean the whole vector register file + */ + { v3:2 = v1:0 + v5:4 = v1:0 + p0 = cmp.gt(r2, #0) /* P0 = (width / VLEN > 0) */ + p1 = cmp.eq(r5, #0) /* P1 = (width % VLEN == 0) */ + } + { q0 = vsetq(r5) + v7:6 = v1:0 + } + { v9:8 = v1:0 + v11:10 = v1:0 + } + { v13:12 = v1:0 + v15:14 = v1:0 + } + { v17:16 = v1:0 + v19:18 = v1:0 + } + { v21:20 = v1:0 + v23:22 = v1:0 + } + { v25:24 = v1:0 + v27:26 = v1:0 + } + { v29:28 = v1:0 + v31:30 = v1:0 + r10 = add(r0, r1) /* R10 = &src[2 * stride] */ + loop1(.outerloop, r3) + } + + /* + * Step 2: vhist + */ + .falign +.outerloop: + { if (!p0) jump .loopend + loop0(.innerloop, r2) + } + + .falign +.innerloop: + { v12.tmp = vmem(R0++#1) + vhist + }:endloop0 + + .falign +.loopend: + if (p1) jump .skip /* if (width % VLEN == 0) done with current row */ + { v13.tmp = vmem(r0 + #0) + vhist(q0) + } + + .falign +.skip: + { r0 = r10 /* R0 = &src[(i + 1) * stride] */ + r10 = add(r10, r1) /* R10 = &src[(i + 2) * stride] */ + }:endloop1 + + + /* + * Step 3: Sum up the data + */ + { v0.h = vshuff(v0.h) + r10 = ##0x00010001 + } + v1.h = vshuff(v1.h) + { V2.h = vshuff(v2.h) + v0.w = vdmpy(v0.h, r10.h):sat + } + { v3.h = vshuff(v3.h) + v1.w = vdmpy(v1.h, r10.h):sat + } + { v4.h = vshuff(V4.h) + v2.w = vdmpy(v2.h, r10.h):sat + } + { v5.h = vshuff(v5.h) + v3.w = vdmpy(v3.h, r10.h):sat + } + { v6.h = vshuff(v6.h) + v4.w = vdmpy(v4.h, r10.h):sat + } + { v7.h = vshuff(v7.h) + v5.w = vdmpy(v5.h, r10.h):sat + } + { v8.h = vshuff(V8.h) + v6.w = vdmpy(v6.h, r10.h):sat + } + { v9.h = vshuff(V9.h) + v7.w = vdmpy(v7.h, r10.h):sat + } + { v10.h = vshuff(v10.h) + v8.w = vdmpy(v8.h, r10.h):sat + } + { v11.h = vshuff(v11.h) + v9.w = vdmpy(v9.h, r10.h):sat + } + { v12.h = vshuff(v12.h) + v10.w = vdmpy(v10.h, r10.h):sat + } + { v13.h = vshuff(V13.h) + v11.w = vdmpy(v11.h, r10.h):sat + } + { v14.h = vshuff(v14.h) + v12.w = vdmpy(v12.h, r10.h):sat + } + { v15.h = vshuff(v15.h) + v13.w = vdmpy(v13.h, r10.h):sat + } + { v16.h = vshuff(v16.h) + v14.w = vdmpy(v14.h, r10.h):sat + } + { v17.h = vshuff(v17.h) + v15.w = vdmpy(v15.h, r10.h):sat + } + { v18.h = vshuff(v18.h) + v16.w = vdmpy(v16.h, r10.h):sat + } + { v19.h = vshuff(v19.h) + v17.w = vdmpy(v17.h, r10.h):sat + } + { v20.h = vshuff(v20.h) + v18.W = vdmpy(v18.h, r10.h):sat + } + { v21.h = vshuff(v21.h) + v19.w = vdmpy(v19.h, r10.h):sat + } + { v22.h = vshuff(v22.h) + v20.w = vdmpy(v20.h, r10.h):sat + } + { v23.h = vshuff(v23.h) + v21.w = vdmpy(v21.h, r10.h):sat + } + { v24.h = vshuff(v24.h) + v22.w = vdmpy(v22.h, r10.h):sat + } + { v25.h = vshuff(v25.h) + v23.w = vdmpy(v23.h, r10.h):sat + } + { v26.h = vshuff(v26.h) + v24.w = vdmpy(v24.h, r10.h):sat + } + { v27.h = vshuff(V27.h) + v25.w = vdmpy(v25.h, r10.h):sat + } + { v28.h = vshuff(v28.h) + v26.w = vdmpy(v26.h, r10.h):sat + } + { v29.h = vshuff(v29.h) + v27.w = vdmpy(v27.h, r10.h):sat + } + { v30.h = vshuff(v30.h) + v28.w = vdmpy(v28.h, r10.h):sat + } + { v31.h = vshuff(v31.h) + v29.w = vdmpy(v29.h, r10.h):sat + r28 = #32 + } + { vshuff(v1, v0, r28) + v30.w = vdmpy(v30.h, r10.h):sat + } + { vshuff(v3, v2, r28) + v31.w = vdmpy(v31.h, r10.h):sat + } + { vshuff(v5, v4, r28) + v0.w = vadd(v1.w, v0.w) + v2.w = vadd(v3.w, v2.w) + } + { vshuff(v7, v6, r28) + r7 = #64 + } + { vshuff(v9, v8, r28) + v4.w = vadd(v5.w, v4.w) + v6.w = vadd(v7.w, v6.w) + } + vshuff(v11, v10, r28) + { vshuff(v13, v12, r28) + v8.w = vadd(v9.w, v8.w) + v10.w = vadd(v11.w, v10.w) + } + vshuff(v15, v14, r28) + { vshuff(v17, v16, r28) + v12.w = vadd(v13.w, v12.w) + v14.w = vadd(v15.w, v14.w) + } + vshuff(v19, v18, r28) + { vshuff(v21, v20, r28) + v16.w = vadd(v17.w, v16.w) + v18.w = vadd(v19.w, v18.w) + } + vshuff(v23, v22, r28) + { vshuff(v25, v24, r28) + v20.w = vadd(v21.w, v20.w) + v22.w = vadd(v23.w, v22.w) + } + vshuff(v27, v26, r28) + { vshuff(v29, v28, r28) + v24.w = vadd(v25.w, v24.w) + v26.w = vadd(v27.w, v26.w) + } + vshuff(v31, v30, r28) + { v28.w = vadd(v29.w, v28.w) + vshuff(v2, v0, r7) + } + { v30.w = vadd(v31.w, v30.w) + vshuff(v6, v4, r7) + v0.w = vadd(v0.w, v2.w) + } + { vshuff(v10, v8, r7) + v1.tmp = vmem(r4 + #0) /* update hist[0-31] */ + v0.w = vadd(v0.w, v1.w) + vmem(r4++#1) = v0.new + } + { vshuff(v14, v12, r7) + v4.w = vadd(v4.w, v6.w) + v8.w = vadd(v8.w, v10.w) + } + { vshuff(v18, v16, r7) + v1.tmp = vmem(r4 + #0) /* update hist[32-63] */ + v4.w = vadd(v4.w, v1.w) + vmem(r4++#1) = v4.new + } + { vshuff(v22, v20, r7) + v12.w = vadd(v12.w, v14.w) + V16.w = vadd(v16.w, v18.w) + } + { vshuff(v26, v24, r7) + v1.tmp = vmem(r4 + #0) /* update hist[64-95] */ + v8.w = vadd(v8.w, v1.w) + vmem(r4++#1) = v8.new + } + { vshuff(v30, v28, r7) + v1.tmp = vmem(r4 + #0) /* update hist[96-127] */ + v12.w = vadd(v12.w, v1.w) + vmem(r4++#1) = v12.new + } + + { v20.w = vadd(v20.w, v22.w) + v1.tmp = vmem(r4 + #0) /* update hist[128-159] */ + v16.w = vadd(v16.w, v1.w) + vmem(r4++#1) = v16.new + } + { v24.w = vadd(v24.w, v26.w) + v1.tmp = vmem(r4 + #0) /* update hist[160-191] */ + v20.w = vadd(v20.w, v1.w) + vmem(r4++#1) = v20.new + } + { v28.w = vadd(v28.w, v30.w) + v1.tmp = vmem(r4 + #0) /* update hist[192-223] */ + v24.w = vadd(v24.w, v1.w) + vmem(r4++#1) = v24.new + } + { v1.tmp = vmem(r4 + #0) /* update hist[224-255] */ + v28.w = vadd(v28.w, v1.w) + vmem(r4++#1) = v28.new + } + jumpr r31 + .size hvx_histogram_row, .-hvx_histogram_row diff --git a/tests/tcg/hexagon/hvx_histogram_row.h b/tests/tcg/hexagon/hvx_histogram_row.h new file mode 100644 index 0000000000..6a4531a92d --- /dev/null +++ b/tests/tcg/hexagon/hvx_histogram_row.h @@ -0,0 +1,24 @@ +/* + * Copyright(c) 2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#ifndef HVX_HISTOGRAM_ROW_H +#define HVX_HISTOGRAM_ROW_H + +void hvx_histogram_row(uint8_t *src, int stride, int width, int height, + int *hist); + +#endif diff --git a/tests/tcg/hexagon/hvx_misc.c b/tests/tcg/hexagon/hvx_misc.c new file mode 100644 index 0000000000..312bb98b41 --- /dev/null +++ b/tests/tcg/hexagon/hvx_misc.c @@ -0,0 +1,469 @@ +/* + * Copyright(c) 2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include +#include +#include +#include + +int err; + +static void __check(int line, int i, int j, uint64_t result, uint64_t expect) +{ + if (result != expect) { + printf("ERROR at line %d: [%d][%d] 0x%016llx != 0x%016llx\n", + line, i, j, result, expect); + err++; + } +} + +#define check(RES, EXP) __check(__LINE__, RES, EXP) + +#define MAX_VEC_SIZE_BYTES 128 + +typedef union { + uint64_t ud[MAX_VEC_SIZE_BYTES / 8]; + int64_t d[MAX_VEC_SIZE_BYTES / 8]; + uint32_t uw[MAX_VEC_SIZE_BYTES / 4]; + int32_t w[MAX_VEC_SIZE_BYTES / 4]; + uint16_t uh[MAX_VEC_SIZE_BYTES / 2]; + int16_t h[MAX_VEC_SIZE_BYTES / 2]; + uint8_t ub[MAX_VEC_SIZE_BYTES / 1]; + int8_t b[MAX_VEC_SIZE_BYTES / 1]; +} MMVector; + +#define BUFSIZE 16 +#define OUTSIZE 16 +#define MASKMOD 3 + +MMVector buffer0[BUFSIZE] __attribute__((aligned(MAX_VEC_SIZE_BYTES))); +MMVector buffer1[BUFSIZE] __attribute__((aligned(MAX_VEC_SIZE_BYTES))); +MMVector mask[BUFSIZE] __attribute__((aligned(MAX_VEC_SIZE_BYTES))); +MMVector output[OUTSIZE] __attribute__((aligned(MAX_VEC_SIZE_BYTES))); +MMVector expect[OUTSIZE] __attribute__((aligned(MAX_VEC_SIZE_BYTES))); + +#define CHECK_OUTPUT_FUNC(FIELD, FIELDSZ) \ +static void check_output_##FIELD(int line, size_t num_vectors) \ +{ \ + for (int i = 0; i < num_vectors; i++) { \ + for (int j = 0; j < MAX_VEC_SIZE_BYTES / FIELDSZ; j++) { \ + __check(line, i, j, output[i].FIELD[j], expect[i].FIELD[j]); \ + } \ + } \ +} + +CHECK_OUTPUT_FUNC(d, 8) +CHECK_OUTPUT_FUNC(w, 4) +CHECK_OUTPUT_FUNC(h, 2) +CHECK_OUTPUT_FUNC(b, 1) + +static void init_buffers(void) +{ + int counter0 = 0; + int counter1 = 17; + for (int i = 0; i < BUFSIZE; i++) { + for (int j = 0; j < MAX_VEC_SIZE_BYTES; j++) { + buffer0[i].b[j] = counter0++; + buffer1[i].b[j] = counter1++; + } + for (int j = 0; j < MAX_VEC_SIZE_BYTES / 4; j++) { + mask[i].w[j] = (i + j % MASKMOD == 0) ? 0 : 1; + } + } +} + +static void test_load_tmp(void) +{ + void *p0 = buffer0; + void *p1 = buffer1; + void *pout = output; + + for (int i = 0; i < BUFSIZE; i++) { + /* + * Load into v12 as .tmp, then use it in the next packet + * Should get the new value within the same packet and + * the old value in the next packet + */ + asm("v3 = vmem(%0 + #0)\n\t" + "r1 = #1\n\t" + "v12 = vsplat(r1)\n\t" + "{\n\t" + " v12.tmp = vmem(%1 + #0)\n\t" + " v4.w = vadd(v12.w, v3.w)\n\t" + "}\n\t" + "v4.w = vadd(v4.w, v12.w)\n\t" + "vmem(%2 + #0) = v4\n\t" + : : "r"(p0), "r"(p1), "r"(pout) + : "r1", "v12", "v3", "v4", "v6", "memory"); + p0 += sizeof(MMVector); + p1 += sizeof(MMVector); + pout += sizeof(MMVector); + + for (int j = 0; j < MAX_VEC_SIZE_BYTES / 4; j++) { + expect[i].w[j] = buffer0[i].w[j] + buffer1[i].w[j] + 1; + } + } + + check_output_w(__LINE__, BUFSIZE); +} + +static void test_load_cur(void) +{ + void *p0 = buffer0; + void *pout = output; + + for (int i = 0; i < BUFSIZE; i++) { + asm("{\n\t" + " v2.cur = vmem(%0 + #0)\n\t" + " vmem(%1 + #0) = v2\n\t" + "}\n\t" + : : "r"(p0), "r"(pout) : "v2", "memory"); + p0 += sizeof(MMVector); + pout += sizeof(MMVector); + + for (int j = 0; j < MAX_VEC_SIZE_BYTES / 4; j++) { + expect[i].uw[j] = buffer0[i].uw[j]; + } + } + + check_output_w(__LINE__, BUFSIZE); +} + +static void test_load_aligned(void) +{ + /* Aligned loads ignore the low bits of the address */ + void *p0 = buffer0; + void *pout = output; + const size_t offset = 13; + + p0 += offset; /* Create an unaligned address */ + asm("v2 = vmem(%0 + #0)\n\t" + "vmem(%1 + #0) = v2\n\t" + : : "r"(p0), "r"(pout) : "v2", "memory"); + + expect[0] = buffer0[0]; + + check_output_w(__LINE__, 1); +} + +static void test_load_unaligned(void) +{ + void *p0 = buffer0; + void *pout = output; + const size_t offset = 12; + + p0 += offset; /* Create an unaligned address */ + asm("v2 = vmemu(%0 + #0)\n\t" + "vmem(%1 + #0) = v2\n\t" + : : "r"(p0), "r"(pout) : "v2", "memory"); + + memcpy(expect, &buffer0[0].ub[offset], sizeof(MMVector)); + + check_output_w(__LINE__, 1); +} + +static void test_store_aligned(void) +{ + /* Aligned stores ignore the low bits of the address */ + void *p0 = buffer0; + void *pout = output; + const size_t offset = 13; + + pout += offset; /* Create an unaligned address */ + asm("v2 = vmem(%0 + #0)\n\t" + "vmem(%1 + #0) = v2\n\t" + : : "r"(p0), "r"(pout) : "v2", "memory"); + + expect[0] = buffer0[0]; + + check_output_w(__LINE__, 1); +} + +static void test_store_unaligned(void) +{ + void *p0 = buffer0; + void *pout = output; + const size_t offset = 12; + + pout += offset; /* Create an unaligned address */ + asm("v2 = vmem(%0 + #0)\n\t" + "vmemu(%1 + #0) = v2\n\t" + : : "r"(p0), "r"(pout) : "v2", "memory"); + + memcpy(expect, buffer0, 2 * sizeof(MMVector)); + memcpy(&expect[0].ub[offset], buffer0, sizeof(MMVector)); + + check_output_w(__LINE__, 2); +} + +static void test_masked_store(bool invert) +{ + void *p0 = buffer0; + void *pmask = mask; + void *pout = output; + + memset(expect, 0xff, sizeof(expect)); + memset(output, 0xff, sizeof(expect)); + + for (int i = 0; i < BUFSIZE; i++) { + if (invert) { + asm("r4 = #0\n\t" + "v4 = vsplat(r4)\n\t" + "v5 = vmem(%0 + #0)\n\t" + "q0 = vcmp.eq(v4.w, v5.w)\n\t" + "v5 = vmem(%1)\n\t" + "if (!q0) vmem(%2) = v5\n\t" /* Inverted test */ + : : "r"(pmask), "r"(p0), "r"(pout) + : "r4", "v4", "v5", "q0", "memory"); + } else { + asm("r4 = #0\n\t" + "v4 = vsplat(r4)\n\t" + "v5 = vmem(%0 + #0)\n\t" + "q0 = vcmp.eq(v4.w, v5.w)\n\t" + "v5 = vmem(%1)\n\t" + "if (q0) vmem(%2) = v5\n\t" /* Non-inverted test */ + : : "r"(pmask), "r"(p0), "r"(pout) + : "r4", "v4", "v5", "q0", "memory"); + } + p0 += sizeof(MMVector); + pmask += sizeof(MMVector); + pout += sizeof(MMVector); + + for (int j = 0; j < MAX_VEC_SIZE_BYTES / 4; j++) { + if (invert) { + if (i + j % MASKMOD != 0) { + expect[i].w[j] = buffer0[i].w[j]; + } + } else { + if (i + j % MASKMOD == 0) { + expect[i].w[j] = buffer0[i].w[j]; + } + } + } + } + + check_output_w(__LINE__, BUFSIZE); +} + +static void test_new_value_store(void) +{ + void *p0 = buffer0; + void *pout = output; + + asm("{\n\t" + " v2 = vmem(%0 + #0)\n\t" + " vmem(%1 + #0) = v2.new\n\t" + "}\n\t" + : : "r"(p0), "r"(pout) : "v2", "memory"); + + expect[0] = buffer0[0]; + + check_output_w(__LINE__, 1); +} + +static void test_max_temps() +{ + void *p0 = buffer0; + void *pout = output; + + asm("v0 = vmem(%0 + #0)\n\t" + "v1 = vmem(%0 + #1)\n\t" + "v2 = vmem(%0 + #2)\n\t" + "v3 = vmem(%0 + #3)\n\t" + "v4 = vmem(%0 + #4)\n\t" + "{\n\t" + " v1:0.w = vadd(v3:2.w, v1:0.w)\n\t" + " v2.b = vshuffe(v3.b, v2.b)\n\t" + " v3.w = vadd(v1.w, v4.w)\n\t" + " v4.tmp = vmem(%0 + #5)\n\t" + "}\n\t" + "vmem(%1 + #0) = v0\n\t" + "vmem(%1 + #1) = v1\n\t" + "vmem(%1 + #2) = v2\n\t" + "vmem(%1 + #3) = v3\n\t" + "vmem(%1 + #4) = v4\n\t" + : : "r"(p0), "r"(pout) : "memory"); + + /* The first two vectors come from the vadd-pair instruction */ + for (int i = 0; i < MAX_VEC_SIZE_BYTES / 4; i++) { + expect[0].w[i] = buffer0[0].w[i] + buffer0[2].w[i]; + expect[1].w[i] = buffer0[1].w[i] + buffer0[3].w[i]; + } + /* The third vector comes from the vshuffe instruction */ + for (int i = 0; i < MAX_VEC_SIZE_BYTES / 2; i++) { + expect[2].uh[i] = (buffer0[2].uh[i] & 0xff) | + (buffer0[3].uh[i] & 0xff) << 8; + } + /* The fourth vector comes from the vadd-single instruction */ + for (int i = 0; i < MAX_VEC_SIZE_BYTES / 4; i++) { + expect[3].w[i] = buffer0[1].w[i] + buffer0[5].w[i]; + } + /* + * The fifth vector comes from the load to v4 + * make sure the .tmp is dropped + */ + expect[4] = buffer0[4]; + + check_output_b(__LINE__, 5); +} + +#define VEC_OP1(ASM, EL, IN, OUT) \ + asm("v2 = vmem(%0 + #0)\n\t" \ + "v2" #EL " = " #ASM "(v2" #EL ")\n\t" \ + "vmem(%1 + #0) = v2\n\t" \ + : : "r"(IN), "r"(OUT) : "v2", "memory") + +#define VEC_OP2(ASM, EL, IN0, IN1, OUT) \ + asm("v2 = vmem(%0 + #0)\n\t" \ + "v3 = vmem(%1 + #0)\n\t" \ + "v2" #EL " = " #ASM "(v2" #EL ", v3" #EL ")\n\t" \ + "vmem(%2 + #0) = v2\n\t" \ + : : "r"(IN0), "r"(IN1), "r"(OUT) : "v2", "v3", "memory") + +#define TEST_VEC_OP1(NAME, ASM, EL, FIELD, FIELDSZ, OP) \ +static void test_##NAME(void) \ +{ \ + void *pin = buffer0; \ + void *pout = output; \ + for (int i = 0; i < BUFSIZE; i++) { \ + VEC_OP1(ASM, EL, pin, pout); \ + pin += sizeof(MMVector); \ + pout += sizeof(MMVector); \ + } \ + for (int i = 0; i < BUFSIZE; i++) { \ + for (int j = 0; j < MAX_VEC_SIZE_BYTES / FIELDSZ; j++) { \ + expect[i].FIELD[j] = OP buffer0[i].FIELD[j]; \ + } \ + } \ + check_output_##FIELD(__LINE__, BUFSIZE); \ +} + +#define TEST_VEC_OP2(NAME, ASM, EL, FIELD, FIELDSZ, OP) \ +static void test_##NAME(void) \ +{ \ + void *p0 = buffer0; \ + void *p1 = buffer1; \ + void *pout = output; \ + for (int i = 0; i < BUFSIZE; i++) { \ + VEC_OP2(ASM, EL, p0, p1, pout); \ + p0 += sizeof(MMVector); \ + p1 += sizeof(MMVector); \ + pout += sizeof(MMVector); \ + } \ + for (int i = 0; i < BUFSIZE; i++) { \ + for (int j = 0; j < MAX_VEC_SIZE_BYTES / FIELDSZ; j++) { \ + expect[i].FIELD[j] = buffer0[i].FIELD[j] OP buffer1[i].FIELD[j]; \ + } \ + } \ + check_output_##FIELD(__LINE__, BUFSIZE); \ +} + +#define THRESHOLD 31 + +#define PRED_OP2(ASM, IN0, IN1, OUT, INV) \ + asm("r4 = #%3\n\t" \ + "v1.b = vsplat(r4)\n\t" \ + "v2 = vmem(%0 + #0)\n\t" \ + "q0 = vcmp.gt(v2.b, v1.b)\n\t" \ + "v3 = vmem(%1 + #0)\n\t" \ + "q1 = vcmp.gt(v3.b, v1.b)\n\t" \ + "q2 = " #ASM "(q0, " INV "q1)\n\t" \ + "r4 = #0xff\n\t" \ + "v1.b = vsplat(r4)\n\t" \ + "if (q2) vmem(%2 + #0) = v1\n\t" \ + : : "r"(IN0), "r"(IN1), "r"(OUT), "i"(THRESHOLD) \ + : "r4", "v1", "v2", "v3", "q0", "q1", "q2", "memory") + +#define TEST_PRED_OP2(NAME, ASM, OP, INV) \ +static void test_##NAME(bool invert) \ +{ \ + void *p0 = buffer0; \ + void *p1 = buffer1; \ + void *pout = output; \ + memset(output, 0, sizeof(expect)); \ + for (int i = 0; i < BUFSIZE; i++) { \ + PRED_OP2(ASM, p0, p1, pout, INV); \ + p0 += sizeof(MMVector); \ + p1 += sizeof(MMVector); \ + pout += sizeof(MMVector); \ + } \ + for (int i = 0; i < BUFSIZE; i++) { \ + for (int j = 0; j < MAX_VEC_SIZE_BYTES; j++) { \ + bool p0 = (buffer0[i].b[j] > THRESHOLD); \ + bool p1 = (buffer1[i].b[j] > THRESHOLD); \ + if (invert) { \ + expect[i].b[j] = (p0 OP !p1) ? 0xff : 0x00; \ + } else { \ + expect[i].b[j] = (p0 OP p1) ? 0xff : 0x00; \ + } \ + } \ + } \ + check_output_b(__LINE__, BUFSIZE); \ +} + +TEST_VEC_OP2(vadd_w, vadd, .w, w, 4, +) +TEST_VEC_OP2(vadd_h, vadd, .h, h, 2, +) +TEST_VEC_OP2(vadd_b, vadd, .b, b, 1, +) +TEST_VEC_OP2(vsub_w, vsub, .w, w, 4, -) +TEST_VEC_OP2(vsub_h, vsub, .h, h, 2, -) +TEST_VEC_OP2(vsub_b, vsub, .b, b, 1, -) +TEST_VEC_OP2(vxor, vxor, , d, 8, ^) +TEST_VEC_OP2(vand, vand, , d, 8, &) +TEST_VEC_OP2(vor, vor, , d, 8, |) +TEST_VEC_OP1(vnot, vnot, , d, 8, ~) + +TEST_PRED_OP2(pred_or, or, |, "") +TEST_PRED_OP2(pred_or_n, or, |, "!") +TEST_PRED_OP2(pred_and, and, &, "") +TEST_PRED_OP2(pred_and_n, and, &, "!") +TEST_PRED_OP2(pred_xor, xor, ^, "") + +int main() +{ + init_buffers(); + + test_load_tmp(); + test_load_cur(); + test_load_aligned(); + test_load_unaligned(); + test_store_aligned(); + test_store_unaligned(); + test_masked_store(false); + test_masked_store(true); + test_new_value_store(); + test_max_temps(); + + test_vadd_w(); + test_vadd_h(); + test_vadd_b(); + test_vsub_w(); + test_vsub_h(); + test_vsub_b(); + test_vxor(); + test_vand(); + test_vor(); + test_vnot(); + + test_pred_or(false); + test_pred_or_n(true); + test_pred_and(false); + test_pred_and_n(true); + test_pred_xor(false); + + puts(err ? "FAIL" : "PASS"); + return err ? 1 : 0; +} diff --git a/tests/tcg/hexagon/overflow.c b/tests/tcg/hexagon/overflow.c new file mode 100644 index 0000000000..196fcf7f3a --- /dev/null +++ b/tests/tcg/hexagon/overflow.c @@ -0,0 +1,107 @@ +/* + * Copyright(c) 2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include +#include +#include +#include +#include +#include +#include + + +int err; + +static void __check(const char *filename, int line, int x, int expect) +{ + if (x != expect) { + printf("ERROR %s:%d - %d != %d\n", + filename, line, x, expect); + err++; + } +} + +#define check(x, expect) __check(__FILE__, __LINE__, (x), (expect)) + +static int satub(int src, int *p, int *ovf_result) +{ + int result; + int usr; + + /* + * This instruction can set bit 0 (OVF/overflow) in usr + * Clear the bit first, then return that bit to the caller + * + * We also store the src into *p in the same packet, so we + * can ensure the overflow doesn't get set when an exception + * is generated. + */ + asm volatile("r2 = usr\n\t" + "r2 = clrbit(r2, #0)\n\t" /* clear overflow bit */ + "usr = r2\n\t" + "{\n\t" + " %0 = satub(%2)\n\t" + " memw(%3) = %2\n\t" + "}\n\t" + "%1 = usr\n\t" + : "=r"(result), "=r"(usr) + : "r"(src), "r"(p) + : "r2", "usr", "memory"); + *ovf_result = (usr & 1); + return result; +} + +int read_usr_overflow(void) +{ + int result; + asm volatile("%0 = usr\n\t" : "=r"(result)); + return result & 1; +} + + +jmp_buf jmp_env; +int usr_overflow; + +static void sig_segv(int sig, siginfo_t *info, void *puc) +{ + usr_overflow = read_usr_overflow(); + longjmp(jmp_env, 1); +} + +int main() +{ + struct sigaction act; + int ovf; + + /* SIGSEGV test */ + act.sa_sigaction = sig_segv; + sigemptyset(&act.sa_mask); + act.sa_flags = SA_SIGINFO; + sigaction(SIGSEGV, &act, NULL); + if (setjmp(jmp_env) == 0) { + satub(300, 0, &ovf); + } + + act.sa_handler = SIG_DFL; + sigemptyset(&act.sa_mask); + act.sa_flags = 0; + + check(usr_overflow, 0); + + puts(err ? "FAIL" : "PASS"); + return err ? EXIT_FAILURE : EXIT_SUCCESS; +} diff --git a/tests/tcg/hexagon/scatter_gather.c b/tests/tcg/hexagon/scatter_gather.c new file mode 100644 index 0000000000..b93eb18133 --- /dev/null +++ b/tests/tcg/hexagon/scatter_gather.c @@ -0,0 +1,1011 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +/* + * This example tests the HVX scatter/gather instructions + * + * See section 5.13 of the V68 HVX Programmer's Reference + * + * There are 3 main classes operations + * _16 16-bit elements and 16-bit offsets + * _32 32-bit elements and 32-bit offsets + * _16_32 16-bit elements and 32-bit offsets + * + * There are also masked and accumulate versions + */ + +#include +#include +#include +#include + +typedef long HVX_Vector __attribute__((__vector_size__(128))) + __attribute__((aligned(128))); +typedef long HVX_VectorPair __attribute__((__vector_size__(256))) + __attribute__((aligned(128))); +typedef long HVX_VectorPred __attribute__((__vector_size__(128))) + __attribute__((aligned(128))); + +#define VSCATTER_16(BASE, RGN, OFF, VALS) \ + __builtin_HEXAGON_V6_vscattermh_128B((int)BASE, RGN, OFF, VALS) +#define VSCATTER_16_MASKED(MASK, BASE, RGN, OFF, VALS) \ + __builtin_HEXAGON_V6_vscattermhq_128B(MASK, (int)BASE, RGN, OFF, VALS) +#define VSCATTER_32(BASE, RGN, OFF, VALS) \ + __builtin_HEXAGON_V6_vscattermw_128B((int)BASE, RGN, OFF, VALS) +#define VSCATTER_32_MASKED(MASK, BASE, RGN, OFF, VALS) \ + __builtin_HEXAGON_V6_vscattermwq_128B(MASK, (int)BASE, RGN, OFF, VALS) +#define VSCATTER_16_32(BASE, RGN, OFF, VALS) \ + __builtin_HEXAGON_V6_vscattermhw_128B((int)BASE, RGN, OFF, VALS) +#define VSCATTER_16_32_MASKED(MASK, BASE, RGN, OFF, VALS) \ + __builtin_HEXAGON_V6_vscattermhwq_128B(MASK, (int)BASE, RGN, OFF, VALS) +#define VSCATTER_16_ACC(BASE, RGN, OFF, VALS) \ + __builtin_HEXAGON_V6_vscattermh_add_128B((int)BASE, RGN, OFF, VALS) +#define VSCATTER_32_ACC(BASE, RGN, OFF, VALS) \ + __builtin_HEXAGON_V6_vscattermw_add_128B((int)BASE, RGN, OFF, VALS) +#define VSCATTER_16_32_ACC(BASE, RGN, OFF, VALS) \ + __builtin_HEXAGON_V6_vscattermhw_add_128B((int)BASE, RGN, OFF, VALS) + +#define VGATHER_16(DSTADDR, BASE, RGN, OFF) \ + __builtin_HEXAGON_V6_vgathermh_128B(DSTADDR, (int)BASE, RGN, OFF) +#define VGATHER_16_MASKED(DSTADDR, MASK, BASE, RGN, OFF) \ + __builtin_HEXAGON_V6_vgathermhq_128B(DSTADDR, MASK, (int)BASE, RGN, OFF) +#define VGATHER_32(DSTADDR, BASE, RGN, OFF) \ + __builtin_HEXAGON_V6_vgathermw_128B(DSTADDR, (int)BASE, RGN, OFF) +#define VGATHER_32_MASKED(DSTADDR, MASK, BASE, RGN, OFF) \ + __builtin_HEXAGON_V6_vgathermwq_128B(DSTADDR, MASK, (int)BASE, RGN, OFF) +#define VGATHER_16_32(DSTADDR, BASE, RGN, OFF) \ + __builtin_HEXAGON_V6_vgathermhw_128B(DSTADDR, (int)BASE, RGN, OFF) +#define VGATHER_16_32_MASKED(DSTADDR, MASK, BASE, RGN, OFF) \ + __builtin_HEXAGON_V6_vgathermhwq_128B(DSTADDR, MASK, (int)BASE, RGN, OFF) + +#define VSHUFF_H(V) \ + __builtin_HEXAGON_V6_vshuffh_128B(V) +#define VSPLAT_H(X) \ + __builtin_HEXAGON_V6_lvsplath_128B(X) +#define VAND_VAL(PRED, VAL) \ + __builtin_HEXAGON_V6_vandvrt_128B(PRED, VAL) +#define VDEAL_H(V) \ + __builtin_HEXAGON_V6_vdealh_128B(V) + +int err; + +/* define the number of rows/cols in a square matrix */ +#define MATRIX_SIZE 64 + +/* define the size of the scatter buffer */ +#define SCATTER_BUFFER_SIZE (MATRIX_SIZE * MATRIX_SIZE) + +/* fake vtcm - put buffers together and force alignment */ +static struct { + unsigned short vscatter16[SCATTER_BUFFER_SIZE]; + unsigned short vgather16[MATRIX_SIZE]; + unsigned int vscatter32[SCATTER_BUFFER_SIZE]; + unsigned int vgather32[MATRIX_SIZE]; + unsigned short vscatter16_32[SCATTER_BUFFER_SIZE]; + unsigned short vgather16_32[MATRIX_SIZE]; +} vtcm __attribute__((aligned(0x10000))); + +/* declare the arrays of reference values */ +unsigned short vscatter16_ref[SCATTER_BUFFER_SIZE]; +unsigned short vgather16_ref[MATRIX_SIZE]; +unsigned int vscatter32_ref[SCATTER_BUFFER_SIZE]; +unsigned int vgather32_ref[MATRIX_SIZE]; +unsigned short vscatter16_32_ref[SCATTER_BUFFER_SIZE]; +unsigned short vgather16_32_ref[MATRIX_SIZE]; + +/* declare the arrays of offsets */ +unsigned short half_offsets[MATRIX_SIZE]; +unsigned int word_offsets[MATRIX_SIZE]; + +/* declare the arrays of values */ +unsigned short half_values[MATRIX_SIZE]; +unsigned short half_values_acc[MATRIX_SIZE]; +unsigned short half_values_masked[MATRIX_SIZE]; +unsigned int word_values[MATRIX_SIZE]; +unsigned int word_values_acc[MATRIX_SIZE]; +unsigned int word_values_masked[MATRIX_SIZE]; + +/* declare the arrays of predicates */ +unsigned short half_predicates[MATRIX_SIZE]; +unsigned int word_predicates[MATRIX_SIZE]; + +/* make this big enough for all the intrinsics */ +const size_t region_len = sizeof(vtcm); + +/* optionally add sync instructions */ +#define SYNC_VECTOR 1 + +static void sync_scatter(void *addr) +{ +#if SYNC_VECTOR + /* + * Do the scatter release followed by a dummy load to complete the + * synchronization. Normally the dummy load would be deferred as + * long as possible to minimize stalls. + */ + asm volatile("vmem(%0 + #0):scatter_release\n" : : "r"(addr)); + /* use volatile to force the load */ + volatile HVX_Vector vDummy = *(HVX_Vector *)addr; vDummy = vDummy; +#endif +} + +static void sync_gather(void *addr) +{ +#if SYNC_VECTOR + /* use volatile to force the load */ + volatile HVX_Vector vDummy = *(HVX_Vector *)addr; vDummy = vDummy; +#endif +} + +/* optionally print the results */ +#define PRINT_DATA 0 + +#define FILL_CHAR '.' + +/* fill vtcm scratch with ee */ +void prefill_vtcm_scratch(void) +{ + memset(&vtcm, FILL_CHAR, sizeof(vtcm)); +} + +/* create byte offsets to be a diagonal of the matrix with 16 bit elements */ +void create_offsets_values_preds_16(void) +{ + unsigned short half_element = 0; + unsigned short half_element_masked = 0; + char letter = 'A'; + char letter_masked = '@'; + + for (int i = 0; i < MATRIX_SIZE; i++) { + half_offsets[i] = i * (2 * MATRIX_SIZE + 2); + + half_element = 0; + half_element_masked = 0; + for (int j = 0; j < 2; j++) { + half_element |= letter << j * 8; + half_element_masked |= letter_masked << j * 8; + } + + half_values[i] = half_element; + half_values_acc[i] = ((i % 10) << 8) + (i % 10); + half_values_masked[i] = half_element_masked; + + letter++; + /* reset to 'A' */ + if (letter == 'M') { + letter = 'A'; + } + + half_predicates[i] = (i % 3 == 0 || i % 5 == 0) ? ~0 : 0; + } +} + +/* create byte offsets to be a diagonal of the matrix with 32 bit elements */ +void create_offsets_values_preds_32(void) +{ + unsigned int word_element = 0; + unsigned int word_element_masked = 0; + char letter = 'A'; + char letter_masked = '&'; + + for (int i = 0; i < MATRIX_SIZE; i++) { + word_offsets[i] = i * (4 * MATRIX_SIZE + 4); + + word_element = 0; + word_element_masked = 0; + for (int j = 0; j < 4; j++) { + word_element |= letter << j * 8; + word_element_masked |= letter_masked << j * 8; + } + + word_values[i] = word_element; + word_values_acc[i] = ((i % 10) << 8) + (i % 10); + word_values_masked[i] = word_element_masked; + + letter++; + /* reset to 'A' */ + if (letter == 'M') { + letter = 'A'; + } + + word_predicates[i] = (i % 4 == 0 || i % 7 == 0) ? ~0 : 0; + } +} + +/* + * create byte offsets to be a diagonal of the matrix with 16 bit elements + * and 32 bit offsets + */ +void create_offsets_values_preds_16_32(void) +{ + unsigned short half_element = 0; + unsigned short half_element_masked = 0; + char letter = 'D'; + char letter_masked = '$'; + + for (int i = 0; i < MATRIX_SIZE; i++) { + word_offsets[i] = i * (2 * MATRIX_SIZE + 2); + + half_element = 0; + half_element_masked = 0; + for (int j = 0; j < 2; j++) { + half_element |= letter << j * 8; + half_element_masked |= letter_masked << j * 8; + } + + half_values[i] = half_element; + half_values_acc[i] = ((i % 10) << 8) + (i % 10); + half_values_masked[i] = half_element_masked; + + letter++; + /* reset to 'A' */ + if (letter == 'P') { + letter = 'D'; + } + + half_predicates[i] = (i % 2 == 0 || i % 13 == 0) ? ~0 : 0; + } +} + +/* scatter the 16 bit elements using intrinsics */ +void vector_scatter_16(void) +{ + /* copy the offsets and values to vectors */ + HVX_Vector offsets = *(HVX_Vector *)half_offsets; + HVX_Vector values = *(HVX_Vector *)half_values; + + VSCATTER_16(&vtcm.vscatter16, region_len, offsets, values); + + sync_scatter(vtcm.vscatter16); +} + +/* scatter-accumulate the 16 bit elements using intrinsics */ +void vector_scatter_16_acc(void) +{ + /* copy the offsets and values to vectors */ + HVX_Vector offsets = *(HVX_Vector *)half_offsets; + HVX_Vector values = *(HVX_Vector *)half_values_acc; + + VSCATTER_16_ACC(&vtcm.vscatter16, region_len, offsets, values); + + sync_scatter(vtcm.vscatter16); +} + +/* scatter the 16 bit elements using intrinsics */ +void vector_scatter_16_masked(void) +{ + /* copy the offsets and values to vectors */ + HVX_Vector offsets = *(HVX_Vector *)half_offsets; + HVX_Vector values = *(HVX_Vector *)half_values_masked; + HVX_Vector pred_reg = *(HVX_Vector *)half_predicates; + HVX_VectorPred preds = VAND_VAL(pred_reg, ~0); + + VSCATTER_16_MASKED(preds, &vtcm.vscatter16, region_len, offsets, values); + + sync_scatter(vtcm.vscatter16); +} + +/* scatter the 32 bit elements using intrinsics */ +void vector_scatter_32(void) +{ + /* copy the offsets and values to vectors */ + HVX_Vector offsetslo = *(HVX_Vector *)word_offsets; + HVX_Vector offsetshi = *(HVX_Vector *)&word_offsets[MATRIX_SIZE / 2]; + HVX_Vector valueslo = *(HVX_Vector *)word_values; + HVX_Vector valueshi = *(HVX_Vector *)&word_values[MATRIX_SIZE / 2]; + + VSCATTER_32(&vtcm.vscatter32, region_len, offsetslo, valueslo); + VSCATTER_32(&vtcm.vscatter32, region_len, offsetshi, valueshi); + + sync_scatter(vtcm.vscatter32); +} + +/* scatter-acc the 32 bit elements using intrinsics */ +void vector_scatter_32_acc(void) +{ + /* copy the offsets and values to vectors */ + HVX_Vector offsetslo = *(HVX_Vector *)word_offsets; + HVX_Vector offsetshi = *(HVX_Vector *)&word_offsets[MATRIX_SIZE / 2]; + HVX_Vector valueslo = *(HVX_Vector *)word_values_acc; + HVX_Vector valueshi = *(HVX_Vector *)&word_values_acc[MATRIX_SIZE / 2]; + + VSCATTER_32_ACC(&vtcm.vscatter32, region_len, offsetslo, valueslo); + VSCATTER_32_ACC(&vtcm.vscatter32, region_len, offsetshi, valueshi); + + sync_scatter(vtcm.vscatter32); +} + +/* scatter the 32 bit elements using intrinsics */ +void vector_scatter_32_masked(void) +{ + /* copy the offsets and values to vectors */ + HVX_Vector offsetslo = *(HVX_Vector *)word_offsets; + HVX_Vector offsetshi = *(HVX_Vector *)&word_offsets[MATRIX_SIZE / 2]; + HVX_Vector valueslo = *(HVX_Vector *)word_values_masked; + HVX_Vector valueshi = *(HVX_Vector *)&word_values_masked[MATRIX_SIZE / 2]; + HVX_Vector pred_reglo = *(HVX_Vector *)word_predicates; + HVX_Vector pred_reghi = *(HVX_Vector *)&word_predicates[MATRIX_SIZE / 2]; + HVX_VectorPred predslo = VAND_VAL(pred_reglo, ~0); + HVX_VectorPred predshi = VAND_VAL(pred_reghi, ~0); + + VSCATTER_32_MASKED(predslo, &vtcm.vscatter32, region_len, offsetslo, + valueslo); + VSCATTER_32_MASKED(predshi, &vtcm.vscatter32, region_len, offsetshi, + valueshi); + + sync_scatter(vtcm.vscatter16); +} + +/* scatter the 16 bit elements with 32 bit offsets using intrinsics */ +void vector_scatter_16_32(void) +{ + HVX_VectorPair offsets; + HVX_Vector values; + + /* get the word offsets in a vector pair */ + offsets = *(HVX_VectorPair *)word_offsets; + + /* these values need to be shuffled for the scatter */ + values = *(HVX_Vector *)half_values; + values = VSHUFF_H(values); + + VSCATTER_16_32(&vtcm.vscatter16_32, region_len, offsets, values); + + sync_scatter(vtcm.vscatter16_32); +} + +/* scatter-acc the 16 bit elements with 32 bit offsets using intrinsics */ +void vector_scatter_16_32_acc(void) +{ + HVX_VectorPair offsets; + HVX_Vector values; + + /* get the word offsets in a vector pair */ + offsets = *(HVX_VectorPair *)word_offsets; + + /* these values need to be shuffled for the scatter */ + values = *(HVX_Vector *)half_values_acc; + values = VSHUFF_H(values); + + VSCATTER_16_32_ACC(&vtcm.vscatter16_32, region_len, offsets, values); + + sync_scatter(vtcm.vscatter16_32); +} + +/* masked scatter the 16 bit elements with 32 bit offsets using intrinsics */ +void vector_scatter_16_32_masked(void) +{ + HVX_VectorPair offsets; + HVX_Vector values; + HVX_Vector pred_reg; + + /* get the word offsets in a vector pair */ + offsets = *(HVX_VectorPair *)word_offsets; + + /* these values need to be shuffled for the scatter */ + values = *(HVX_Vector *)half_values_masked; + values = VSHUFF_H(values); + + pred_reg = *(HVX_Vector *)half_predicates; + pred_reg = VSHUFF_H(pred_reg); + HVX_VectorPred preds = VAND_VAL(pred_reg, ~0); + + VSCATTER_16_32_MASKED(preds, &vtcm.vscatter16_32, region_len, offsets, + values); + + sync_scatter(vtcm.vscatter16_32); +} + +/* gather the elements from the scatter16 buffer */ +void vector_gather_16(void) +{ + HVX_Vector *vgather = (HVX_Vector *)&vtcm.vgather16; + HVX_Vector offsets = *(HVX_Vector *)half_offsets; + + VGATHER_16(vgather, &vtcm.vscatter16, region_len, offsets); + + sync_gather(vgather); +} + +static unsigned short gather_16_masked_init(void) +{ + char letter = '?'; + return letter | (letter << 8); +} + +void vector_gather_16_masked(void) +{ + HVX_Vector *vgather = (HVX_Vector *)&vtcm.vgather16; + HVX_Vector offsets = *(HVX_Vector *)half_offsets; + HVX_Vector pred_reg = *(HVX_Vector *)half_predicates; + HVX_VectorPred preds = VAND_VAL(pred_reg, ~0); + + *vgather = VSPLAT_H(gather_16_masked_init()); + VGATHER_16_MASKED(vgather, preds, &vtcm.vscatter16, region_len, offsets); + + sync_gather(vgather); +} + +/* gather the elements from the scatter32 buffer */ +void vector_gather_32(void) +{ + HVX_Vector *vgatherlo = (HVX_Vector *)&vtcm.vgather32; + HVX_Vector *vgatherhi = + (HVX_Vector *)((int)&vtcm.vgather32 + (MATRIX_SIZE * 2)); + HVX_Vector offsetslo = *(HVX_Vector *)word_offsets; + HVX_Vector offsetshi = *(HVX_Vector *)&word_offsets[MATRIX_SIZE / 2]; + + VGATHER_32(vgatherlo, &vtcm.vscatter32, region_len, offsetslo); + VGATHER_32(vgatherhi, &vtcm.vscatter32, region_len, offsetshi); + + sync_gather(vgatherhi); +} + +static unsigned int gather_32_masked_init(void) +{ + char letter = '?'; + return letter | (letter << 8) | (letter << 16) | (letter << 24); +} + +void vector_gather_32_masked(void) +{ + HVX_Vector *vgatherlo = (HVX_Vector *)&vtcm.vgather32; + HVX_Vector *vgatherhi = + (HVX_Vector *)((int)&vtcm.vgather32 + (MATRIX_SIZE * 2)); + HVX_Vector offsetslo = *(HVX_Vector *)word_offsets; + HVX_Vector offsetshi = *(HVX_Vector *)&word_offsets[MATRIX_SIZE / 2]; + HVX_Vector pred_reglo = *(HVX_Vector *)word_predicates; + HVX_VectorPred predslo = VAND_VAL(pred_reglo, ~0); + HVX_Vector pred_reghi = *(HVX_Vector *)&word_predicates[MATRIX_SIZE / 2]; + HVX_VectorPred predshi = VAND_VAL(pred_reghi, ~0); + + *vgatherlo = VSPLAT_H(gather_32_masked_init()); + *vgatherhi = VSPLAT_H(gather_32_masked_init()); + VGATHER_32_MASKED(vgatherlo, predslo, &vtcm.vscatter32, region_len, + offsetslo); + VGATHER_32_MASKED(vgatherhi, predshi, &vtcm.vscatter32, region_len, + offsetshi); + + sync_gather(vgatherlo); + sync_gather(vgatherhi); +} + +/* gather the elements from the scatter16_32 buffer */ +void vector_gather_16_32(void) +{ + HVX_Vector *vgather; + HVX_VectorPair offsets; + HVX_Vector values; + + /* get the vtcm address to gather from */ + vgather = (HVX_Vector *)&vtcm.vgather16_32; + + /* get the word offsets in a vector pair */ + offsets = *(HVX_VectorPair *)word_offsets; + + VGATHER_16_32(vgather, &vtcm.vscatter16_32, region_len, offsets); + + /* deal the elements to get the order back */ + values = *(HVX_Vector *)vgather; + values = VDEAL_H(values); + + /* write it back to vtcm address */ + *(HVX_Vector *)vgather = values; +} + +void vector_gather_16_32_masked(void) +{ + HVX_Vector *vgather; + HVX_VectorPair offsets; + HVX_Vector pred_reg; + HVX_VectorPred preds; + HVX_Vector values; + + /* get the vtcm address to gather from */ + vgather = (HVX_Vector *)&vtcm.vgather16_32; + + /* get the word offsets in a vector pair */ + offsets = *(HVX_VectorPair *)word_offsets; + pred_reg = *(HVX_Vector *)half_predicates; + pred_reg = VSHUFF_H(pred_reg); + preds = VAND_VAL(pred_reg, ~0); + + *vgather = VSPLAT_H(gather_16_masked_init()); + VGATHER_16_32_MASKED(vgather, preds, &vtcm.vscatter16_32, region_len, + offsets); + + /* deal the elements to get the order back */ + values = *(HVX_Vector *)vgather; + values = VDEAL_H(values); + + /* write it back to vtcm address */ + *(HVX_Vector *)vgather = values; +} + +static void check_buffer(const char *name, void *c, void *r, size_t size) +{ + char *check = (char *)c; + char *ref = (char *)r; + for (int i = 0; i < size; i++) { + if (check[i] != ref[i]) { + printf("ERROR %s [%d]: 0x%x (%c) != 0x%x (%c)\n", name, i, + check[i], check[i], ref[i], ref[i]); + err++; + } + } +} + +/* + * These scalar functions are the C equivalents of the vector functions that + * use HVX + */ + +/* scatter the 16 bit elements using C */ +void scalar_scatter_16(unsigned short *vscatter16) +{ + for (int i = 0; i < MATRIX_SIZE; ++i) { + vscatter16[half_offsets[i] / 2] = half_values[i]; + } +} + +void check_scatter_16() +{ + memset(vscatter16_ref, FILL_CHAR, + SCATTER_BUFFER_SIZE * sizeof(unsigned short)); + scalar_scatter_16(vscatter16_ref); + check_buffer(__func__, vtcm.vscatter16, vscatter16_ref, + SCATTER_BUFFER_SIZE * sizeof(unsigned short)); +} + +/* scatter the 16 bit elements using C */ +void scalar_scatter_16_acc(unsigned short *vscatter16) +{ + for (int i = 0; i < MATRIX_SIZE; ++i) { + vscatter16[half_offsets[i] / 2] += half_values_acc[i]; + } +} + +void check_scatter_16_acc() +{ + memset(vscatter16_ref, FILL_CHAR, + SCATTER_BUFFER_SIZE * sizeof(unsigned short)); + scalar_scatter_16(vscatter16_ref); + scalar_scatter_16_acc(vscatter16_ref); + check_buffer(__func__, vtcm.vscatter16, vscatter16_ref, + SCATTER_BUFFER_SIZE * sizeof(unsigned short)); +} + +/* scatter the 16 bit elements using C */ +void scalar_scatter_16_masked(unsigned short *vscatter16) +{ + for (int i = 0; i < MATRIX_SIZE; i++) { + if (half_predicates[i]) { + vscatter16[half_offsets[i] / 2] = half_values_masked[i]; + } + } + +} + +void check_scatter_16_masked() +{ + memset(vscatter16_ref, FILL_CHAR, + SCATTER_BUFFER_SIZE * sizeof(unsigned short)); + scalar_scatter_16(vscatter16_ref); + scalar_scatter_16_acc(vscatter16_ref); + scalar_scatter_16_masked(vscatter16_ref); + check_buffer(__func__, vtcm.vscatter16, vscatter16_ref, + SCATTER_BUFFER_SIZE * sizeof(unsigned short)); +} + +/* scatter the 32 bit elements using C */ +void scalar_scatter_32(unsigned int *vscatter32) +{ + for (int i = 0; i < MATRIX_SIZE; ++i) { + vscatter32[word_offsets[i] / 4] = word_values[i]; + } +} + +void check_scatter_32() +{ + memset(vscatter32_ref, FILL_CHAR, + SCATTER_BUFFER_SIZE * sizeof(unsigned int)); + scalar_scatter_32(vscatter32_ref); + check_buffer(__func__, vtcm.vscatter32, vscatter32_ref, + SCATTER_BUFFER_SIZE * sizeof(unsigned int)); +} + +/* scatter the 32 bit elements using C */ +void scalar_scatter_32_acc(unsigned int *vscatter32) +{ + for (int i = 0; i < MATRIX_SIZE; ++i) { + vscatter32[word_offsets[i] / 4] += word_values_acc[i]; + } +} + +void check_scatter_32_acc() +{ + memset(vscatter32_ref, FILL_CHAR, + SCATTER_BUFFER_SIZE * sizeof(unsigned int)); + scalar_scatter_32(vscatter32_ref); + scalar_scatter_32_acc(vscatter32_ref); + check_buffer(__func__, vtcm.vscatter32, vscatter32_ref, + SCATTER_BUFFER_SIZE * sizeof(unsigned int)); +} + +/* scatter the 32 bit elements using C */ +void scalar_scatter_32_masked(unsigned int *vscatter32) +{ + for (int i = 0; i < MATRIX_SIZE; i++) { + if (word_predicates[i]) { + vscatter32[word_offsets[i] / 4] = word_values_masked[i]; + } + } +} + +void check_scatter_32_masked() +{ + memset(vscatter32_ref, FILL_CHAR, + SCATTER_BUFFER_SIZE * sizeof(unsigned int)); + scalar_scatter_32(vscatter32_ref); + scalar_scatter_32_acc(vscatter32_ref); + scalar_scatter_32_masked(vscatter32_ref); + check_buffer(__func__, vtcm.vscatter32, vscatter32_ref, + SCATTER_BUFFER_SIZE * sizeof(unsigned int)); +} + +/* scatter the 32 bit elements using C */ +void scalar_scatter_16_32(unsigned short *vscatter16_32) +{ + for (int i = 0; i < MATRIX_SIZE; ++i) { + vscatter16_32[word_offsets[i] / 2] = half_values[i]; + } +} + +void check_scatter_16_32() +{ + memset(vscatter16_32_ref, FILL_CHAR, + SCATTER_BUFFER_SIZE * sizeof(unsigned short)); + scalar_scatter_16_32(vscatter16_32_ref); + check_buffer(__func__, vtcm.vscatter16_32, vscatter16_32_ref, + SCATTER_BUFFER_SIZE * sizeof(unsigned short)); +} + +/* scatter the 32 bit elements using C */ +void scalar_scatter_16_32_acc(unsigned short *vscatter16_32) +{ + for (int i = 0; i < MATRIX_SIZE; ++i) { + vscatter16_32[word_offsets[i] / 2] += half_values_acc[i]; + } +} + +void check_scatter_16_32_acc() +{ + memset(vscatter16_32_ref, FILL_CHAR, + SCATTER_BUFFER_SIZE * sizeof(unsigned short)); + scalar_scatter_16_32(vscatter16_32_ref); + scalar_scatter_16_32_acc(vscatter16_32_ref); + check_buffer(__func__, vtcm.vscatter16_32, vscatter16_32_ref, + SCATTER_BUFFER_SIZE * sizeof(unsigned short)); +} + +void scalar_scatter_16_32_masked(unsigned short *vscatter16_32) +{ + for (int i = 0; i < MATRIX_SIZE; i++) { + if (half_predicates[i]) { + vscatter16_32[word_offsets[i] / 2] = half_values_masked[i]; + } + } +} + +void check_scatter_16_32_masked() +{ + memset(vscatter16_32_ref, FILL_CHAR, + SCATTER_BUFFER_SIZE * sizeof(unsigned short)); + scalar_scatter_16_32(vscatter16_32_ref); + scalar_scatter_16_32_acc(vscatter16_32_ref); + scalar_scatter_16_32_masked(vscatter16_32_ref); + check_buffer(__func__, vtcm.vscatter16_32, vscatter16_32_ref, + SCATTER_BUFFER_SIZE * sizeof(unsigned short)); +} + +/* gather the elements from the scatter buffer using C */ +void scalar_gather_16(unsigned short *vgather16) +{ + for (int i = 0; i < MATRIX_SIZE; ++i) { + vgather16[i] = vtcm.vscatter16[half_offsets[i] / 2]; + } +} + +void check_gather_16() +{ + memset(vgather16_ref, 0, MATRIX_SIZE * sizeof(unsigned short)); + scalar_gather_16(vgather16_ref); + check_buffer(__func__, vtcm.vgather16, vgather16_ref, + MATRIX_SIZE * sizeof(unsigned short)); +} + +void scalar_gather_16_masked(unsigned short *vgather16) +{ + for (int i = 0; i < MATRIX_SIZE; ++i) { + if (half_predicates[i]) { + vgather16[i] = vtcm.vscatter16[half_offsets[i] / 2]; + } + } +} + +void check_gather_16_masked() +{ + memset(vgather16_ref, gather_16_masked_init(), + MATRIX_SIZE * sizeof(unsigned short)); + scalar_gather_16_masked(vgather16_ref); + check_buffer(__func__, vtcm.vgather16, vgather16_ref, + MATRIX_SIZE * sizeof(unsigned short)); +} + +/* gather the elements from the scatter buffer using C */ +void scalar_gather_32(unsigned int *vgather32) +{ + for (int i = 0; i < MATRIX_SIZE; ++i) { + vgather32[i] = vtcm.vscatter32[word_offsets[i] / 4]; + } +} + +void check_gather_32(void) +{ + memset(vgather32_ref, 0, MATRIX_SIZE * sizeof(unsigned int)); + scalar_gather_32(vgather32_ref); + check_buffer(__func__, vtcm.vgather32, vgather32_ref, + MATRIX_SIZE * sizeof(unsigned int)); +} + +void scalar_gather_32_masked(unsigned int *vgather32) +{ + for (int i = 0; i < MATRIX_SIZE; ++i) { + if (word_predicates[i]) { + vgather32[i] = vtcm.vscatter32[word_offsets[i] / 4]; + } + } +} + + +void check_gather_32_masked(void) +{ + memset(vgather32_ref, gather_32_masked_init(), + MATRIX_SIZE * sizeof(unsigned int)); + scalar_gather_32_masked(vgather32_ref); + check_buffer(__func__, vtcm.vgather32, + vgather32_ref, MATRIX_SIZE * sizeof(unsigned int)); +} + +/* gather the elements from the scatter buffer using C */ +void scalar_gather_16_32(unsigned short *vgather16_32) +{ + for (int i = 0; i < MATRIX_SIZE; ++i) { + vgather16_32[i] = vtcm.vscatter16_32[word_offsets[i] / 2]; + } +} + +void check_gather_16_32(void) +{ + memset(vgather16_32_ref, 0, MATRIX_SIZE * sizeof(unsigned short)); + scalar_gather_16_32(vgather16_32_ref); + check_buffer(__func__, vtcm.vgather16_32, vgather16_32_ref, + MATRIX_SIZE * sizeof(unsigned short)); +} + +void scalar_gather_16_32_masked(unsigned short *vgather16_32) +{ + for (int i = 0; i < MATRIX_SIZE; ++i) { + if (half_predicates[i]) { + vgather16_32[i] = vtcm.vscatter16_32[word_offsets[i] / 2]; + } + } + +} + +void check_gather_16_32_masked(void) +{ + memset(vgather16_32_ref, gather_16_masked_init(), + MATRIX_SIZE * sizeof(unsigned short)); + scalar_gather_16_32_masked(vgather16_32_ref); + check_buffer(__func__, vtcm.vgather16_32, vgather16_32_ref, + MATRIX_SIZE * sizeof(unsigned short)); +} + +/* print scatter16 buffer */ +void print_scatter16_buffer(void) +{ + if (PRINT_DATA) { + printf("\n\nPrinting the 16 bit scatter buffer"); + + for (int i = 0; i < SCATTER_BUFFER_SIZE; i++) { + if ((i % MATRIX_SIZE) == 0) { + printf("\n"); + } + for (int j = 0; j < 2; j++) { + printf("%c", (char)((vtcm.vscatter16[i] >> j * 8) & 0xff)); + } + printf(" "); + } + printf("\n"); + } +} + +/* print the gather 16 buffer */ +void print_gather_result_16(void) +{ + if (PRINT_DATA) { + printf("\n\nPrinting the 16 bit gather result\n"); + + for (int i = 0; i < MATRIX_SIZE; i++) { + for (int j = 0; j < 2; j++) { + printf("%c", (char)((vtcm.vgather16[i] >> j * 8) & 0xff)); + } + printf(" "); + } + printf("\n"); + } +} + +/* print the scatter32 buffer */ +void print_scatter32_buffer(void) +{ + if (PRINT_DATA) { + printf("\n\nPrinting the 32 bit scatter buffer"); + + for (int i = 0; i < SCATTER_BUFFER_SIZE; i++) { + if ((i % MATRIX_SIZE) == 0) { + printf("\n"); + } + for (int j = 0; j < 4; j++) { + printf("%c", (char)((vtcm.vscatter32[i] >> j * 8) & 0xff)); + } + printf(" "); + } + printf("\n"); + } +} + +/* print the gather 32 buffer */ +void print_gather_result_32(void) +{ + if (PRINT_DATA) { + printf("\n\nPrinting the 32 bit gather result\n"); + + for (int i = 0; i < MATRIX_SIZE; i++) { + for (int j = 0; j < 4; j++) { + printf("%c", (char)((vtcm.vgather32[i] >> j * 8) & 0xff)); + } + printf(" "); + } + printf("\n"); + } +} + +/* print the scatter16_32 buffer */ +void print_scatter16_32_buffer(void) +{ + if (PRINT_DATA) { + printf("\n\nPrinting the 16_32 bit scatter buffer"); + + for (int i = 0; i < SCATTER_BUFFER_SIZE; i++) { + if ((i % MATRIX_SIZE) == 0) { + printf("\n"); + } + for (int j = 0; j < 2; j++) { + printf("%c", + (unsigned char)((vtcm.vscatter16_32[i] >> j * 8) & 0xff)); + } + printf(" "); + } + printf("\n"); + } +} + +/* print the gather 16_32 buffer */ +void print_gather_result_16_32(void) +{ + if (PRINT_DATA) { + printf("\n\nPrinting the 16_32 bit gather result\n"); + + for (int i = 0; i < MATRIX_SIZE; i++) { + for (int j = 0; j < 2; j++) { + printf("%c", + (unsigned char)((vtcm.vgather16_32[i] >> j * 8) & 0xff)); + } + printf(" "); + } + printf("\n"); + } +} + +int main() +{ + prefill_vtcm_scratch(); + + /* 16 bit elements with 16 bit offsets */ + create_offsets_values_preds_16(); + + vector_scatter_16(); + print_scatter16_buffer(); + check_scatter_16(); + + vector_gather_16(); + print_gather_result_16(); + check_gather_16(); + + vector_gather_16_masked(); + print_gather_result_16(); + check_gather_16_masked(); + + vector_scatter_16_acc(); + print_scatter16_buffer(); + check_scatter_16_acc(); + + vector_scatter_16_masked(); + print_scatter16_buffer(); + check_scatter_16_masked(); + + /* 32 bit elements with 32 bit offsets */ + create_offsets_values_preds_32(); + + vector_scatter_32(); + print_scatter32_buffer(); + check_scatter_32(); + + vector_gather_32(); + print_gather_result_32(); + check_gather_32(); + + vector_gather_32_masked(); + print_gather_result_32(); + check_gather_32_masked(); + + vector_scatter_32_acc(); + print_scatter32_buffer(); + check_scatter_32_acc(); + + vector_scatter_32_masked(); + print_scatter32_buffer(); + check_scatter_32_masked(); + + /* 16 bit elements with 32 bit offsets */ + create_offsets_values_preds_16_32(); + + vector_scatter_16_32(); + print_scatter16_32_buffer(); + check_scatter_16_32(); + + vector_gather_16_32(); + print_gather_result_16_32(); + check_gather_16_32(); + + vector_gather_16_32_masked(); + print_gather_result_16_32(); + check_gather_16_32_masked(); + + vector_scatter_16_32_acc(); + print_scatter16_32_buffer(); + check_scatter_16_32_acc(); + + vector_scatter_16_32_masked(); + print_scatter16_32_buffer(); + check_scatter_16_32_masked(); + + puts(err ? "FAIL" : "PASS"); + return err; +} diff --git a/tests/tcg/hexagon/vector_add_int.c b/tests/tcg/hexagon/vector_add_int.c new file mode 100644 index 0000000000..d6010ea14b --- /dev/null +++ b/tests/tcg/hexagon/vector_add_int.c @@ -0,0 +1,61 @@ +/* + * Copyright(c) 2019-2021 Qualcomm Innovation Center, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include + +int gA[401]; +int gB[401]; +int gC[401]; + +void vector_add_int() +{ + int i; + for (i = 0; i < 400; i++) { + gA[i] = gB[i] + gC[i]; + } +} + +int main() +{ + int error = 0; + int i; + for (i = 0; i < 400; i++) { + gB[i] = i * 2; + gC[i] = i * 3; + } + gA[400] = 17; + vector_add_int(); + for (i = 0; i < 400; i++) { + if (gA[i] != i * 5) { + error++; + printf("ERROR: gB[%d] = %d\t", i, gB[i]); + printf("gC[%d] = %d\t", i, gC[i]); + printf("gA[%d] = %d\n", i, gA[i]); + } + } + if (gA[400] != 17) { + error++; + printf("ERROR: Overran the buffer\n"); + } + if (!error) { + printf("PASS\n"); + return 0; + } else { + printf("FAIL\n"); + return 1; + } +} diff --git a/tests/tcg/hppa/Makefile.target b/tests/tcg/hppa/Makefile.target index 473864d1d4..d0d5e0e257 100644 --- a/tests/tcg/hppa/Makefile.target +++ b/tests/tcg/hppa/Makefile.target @@ -5,3 +5,10 @@ # On parisc Linux supports 4K/16K/64K (but currently only 4k works) EXTRA_RUNS+=run-test-mmap-4096 # run-test-mmap-16384 run-test-mmap-65536 +# This triggers failures for hppa-linux about 1% of the time +# HPPA is the odd target that can't use the sigtramp page; +# it requires the full vdso with dwarf2 unwind info. +run-signals: signals + $(call skip-test, $<, "BROKEN awaiting vdso support") +run-plugin-signals-with-%: + $(call skip-test, $<, "BROKEN awaiting vdso support") diff --git a/tests/tcg/i386/Makefile.target b/tests/tcg/i386/Makefile.target index a053ca3f15..38c10379af 100644 --- a/tests/tcg/i386/Makefile.target +++ b/tests/tcg/i386/Makefile.target @@ -65,9 +65,6 @@ run-plugin-%-with-libinsn.so: -d plugin -D $*-with-libinsn.so.pout $*, \ "$* (inline) on $(TARGET_NAME)") -run-plugin-signals-with-libinsn.so: - $(call skip-test, $<, "BROKEN awaiting sigframe clean-ups and vdso support") - # Update TESTS I386_TESTS:=$(filter-out $(SKIP_I386_TESTS), $(ALL_X86_TESTS)) TESTS=$(MULTIARCH_TESTS) $(I386_TESTS) diff --git a/tests/tcg/multiarch/Makefile.target b/tests/tcg/multiarch/Makefile.target index 85a6fb7a2e..a83efb4a9d 100644 --- a/tests/tcg/multiarch/Makefile.target +++ b/tests/tcg/multiarch/Makefile.target @@ -8,9 +8,13 @@ MULTIARCH_SRC=$(SRC_PATH)/tests/tcg/multiarch # Set search path for all sources -VPATH += $(MULTIARCH_SRC) -MULTIARCH_SRCS =$(notdir $(wildcard $(MULTIARCH_SRC)/*.c)) -MULTIARCH_TESTS =$(filter-out float_helpers, $(MULTIARCH_SRCS:.c=)) +VPATH += $(MULTIARCH_SRC) +MULTIARCH_SRCS = $(notdir $(wildcard $(MULTIARCH_SRC)/*.c)) +ifneq ($(CONFIG_LINUX),) +VPATH += $(MULTIARCH_SRC)/linux +MULTIARCH_SRCS += $(notdir $(wildcard $(MULTIARCH_SRC)/linux/*.c)) +endif +MULTIARCH_TESTS = $(MULTIARCH_SRCS:.c=) # # The following are any additional rules needed to build things @@ -18,8 +22,8 @@ MULTIARCH_TESTS =$(filter-out float_helpers, $(MULTIARCH_SRCS:.c=)) float_%: LDFLAGS+=-lm -float_%: float_%.c float_helpers.c - $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< $(MULTIARCH_SRC)/float_helpers.c -o $@ $(LDFLAGS) +float_%: float_%.c libs/float_helpers.c + $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $< $(MULTIARCH_SRC)/libs/float_helpers.c -o $@ $(LDFLAGS) run-float_%: float_% $(call run-test,$<, $(QEMU) $(QEMU_OPTS) $<,"$< on $(TARGET_NAME)") @@ -32,14 +36,6 @@ threadcount: LDFLAGS+=-lpthread signals: LDFLAGS+=-lrt -lpthread -# This triggers failures on s390x hosts about 4% of the time -# This triggers failures for hppa-linux about 1% of the time -run-signals: signals - $(call skip-test, $<, "BROKEN awaiting sigframe clean-ups and vdso support") - -run-plugin-signals-with-%: - $(call skip-test, $<, "BROKEN awaiting sigframe clean-ups and vdso support") - # We define the runner for test-mmap after the individual # architectures have defined their supported pages sizes. If no # additional page sizes are defined we only run the default test. @@ -64,8 +60,6 @@ run-gdbstub-sha1: sha1 --bin $< --test $(MULTIARCH_SRC)/gdbstub/sha1.py, \ "basic gdbstub support") -EXTRA_RUNS += run-gdbstub-sha1 - run-gdbstub-qxfer-auxv-read: sha1 $(call run-test, $@, $(GDB_SCRIPT) \ --gdb $(HAVE_GDB_BIN) \ @@ -73,11 +67,19 @@ run-gdbstub-qxfer-auxv-read: sha1 --bin $< --test $(MULTIARCH_SRC)/gdbstub/test-qxfer-auxv-read.py, \ "basic gdbstub qXfer:auxv:read support") +run-gdbstub-thread-breakpoint: testthread + $(call run-test, $@, $(GDB_SCRIPT) \ + --gdb $(HAVE_GDB_BIN) \ + --qemu $(QEMU) --qargs "$(QEMU_OPTS)" \ + --bin $< --test $(MULTIARCH_SRC)/gdbstub/test-thread-breakpoint.py, \ + "hitting a breakpoint on non-main thread") + else run-gdbstub-%: $(call skip-test, "gdbstub test $*", "need working gdb") endif -EXTRA_RUNS += run-gdbstub-sha1 run-gdbstub-qxfer-auxv-read +EXTRA_RUNS += run-gdbstub-sha1 run-gdbstub-qxfer-auxv-read \ + run-gdbstub-thread-breakpoint # ARM Compatible Semi Hosting Tests # diff --git a/tests/tcg/multiarch/gdbstub/test-thread-breakpoint.py b/tests/tcg/multiarch/gdbstub/test-thread-breakpoint.py new file mode 100644 index 0000000000..798d508bc7 --- /dev/null +++ b/tests/tcg/multiarch/gdbstub/test-thread-breakpoint.py @@ -0,0 +1,60 @@ +from __future__ import print_function +# +# Test auxiliary vector is loaded via gdbstub +# +# This is launched via tests/guest-debug/run-test.py +# + +import gdb +import sys + +failcount = 0 + +def report(cond, msg): + "Report success/fail of test" + if cond: + print ("PASS: %s" % (msg)) + else: + print ("FAIL: %s" % (msg)) + global failcount + failcount += 1 + +def run_test(): + "Run through the tests one by one" + + sym, ok = gdb.lookup_symbol("thread1_func") + gdb.execute("b thread1_func") + gdb.execute("c") + + frame = gdb.selected_frame() + report(str(frame.function()) == "thread1_func", "break @ %s"%frame) + +# +# This runs as the script it sourced (via -x, via run-test.py) +# +try: + inferior = gdb.selected_inferior() + arch = inferior.architecture() + print("ATTACHED: %s" % arch.name()) +except (gdb.error, AttributeError): + print("SKIPPING (not connected)", file=sys.stderr) + exit(0) + +if gdb.parse_and_eval('$pc') == 0: + print("SKIP: PC not set") + exit(0) + +try: + # These are not very useful in scripts + gdb.execute("set pagination off") + gdb.execute("set confirm off") + + # Run the actual tests + run_test() +except (gdb.error): + print ("GDB Exception: %s" % (sys.exc_info()[0])) + failcount += 1 + pass + +print("All tests complete: %d failures" % failcount) +exit(failcount) diff --git a/tests/tcg/multiarch/float_helpers.c b/tests/tcg/multiarch/libs/float_helpers.c similarity index 99% rename from tests/tcg/multiarch/float_helpers.c rename to tests/tcg/multiarch/libs/float_helpers.c index bc530e5732..4e68d2b659 100644 --- a/tests/tcg/multiarch/float_helpers.c +++ b/tests/tcg/multiarch/libs/float_helpers.c @@ -22,7 +22,7 @@ #include #include -#include "float_helpers.h" +#include "../float_helpers.h" #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) diff --git a/tests/tcg/multiarch/linux-test.c b/tests/tcg/multiarch/linux/linux-test.c similarity index 100% rename from tests/tcg/multiarch/linux-test.c rename to tests/tcg/multiarch/linux/linux-test.c diff --git a/tests/tcg/multiarch/sha1.c b/tests/tcg/multiarch/sha1.c index 87bfbcdf52..0081bd7657 100644 --- a/tests/tcg/multiarch/sha1.c +++ b/tests/tcg/multiarch/sha1.c @@ -43,7 +43,6 @@ void SHA1Init(SHA1_CTX* context); void SHA1Update(SHA1_CTX* context, const unsigned char* data, uint32_t len); void SHA1Final(unsigned char digest[20], SHA1_CTX* context); /* ================ end of sha1.h ================ */ -#include #define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits)))) diff --git a/tests/tcg/nios2/Makefile.target b/tests/tcg/nios2/Makefile.target new file mode 100644 index 0000000000..b38e2352b7 --- /dev/null +++ b/tests/tcg/nios2/Makefile.target @@ -0,0 +1,11 @@ +# nios2 specific test tweaks + +# Currently nios2 signal handling is broken +run-signals: signals + $(call skip-test, $<, "BROKEN") +run-plugin-signals-with-%: + $(call skip-test, $<, "BROKEN") +run-linux-test: linux-test + $(call skip-test, $<, "BROKEN") +run-plugin-linux-test-with-%: + $(call skip-test, $<, "BROKEN") diff --git a/tests/tcg/ppc64/Makefile.target b/tests/tcg/ppc64/Makefile.target index a6a4ddaeca..6ab7934fdf 100644 --- a/tests/tcg/ppc64/Makefile.target +++ b/tests/tcg/ppc64/Makefile.target @@ -23,4 +23,6 @@ run-plugin-byte_reverse-with-%: $(call skip-test, "RUN of byte_reverse ($*)", "not built") endif +PPC64_TESTS += signal_save_restore_xer + TESTS += $(PPC64_TESTS) diff --git a/tests/tcg/ppc64le/Makefile.target b/tests/tcg/ppc64le/Makefile.target index c0c14ffbad..5e65b1590d 100644 --- a/tests/tcg/ppc64le/Makefile.target +++ b/tests/tcg/ppc64le/Makefile.target @@ -22,4 +22,6 @@ run-plugin-byte_reverse-with-%: $(call skip-test, "RUN of byte_reverse ($*)", "not built") endif +PPC64LE_TESTS += signal_save_restore_xer + TESTS += $(PPC64LE_TESTS) diff --git a/tests/tcg/ppc64le/signal_save_restore_xer.c b/tests/tcg/ppc64le/signal_save_restore_xer.c new file mode 100644 index 0000000000..e4f8a07dd7 --- /dev/null +++ b/tests/tcg/ppc64le/signal_save_restore_xer.c @@ -0,0 +1,42 @@ +#include +#include +#include +#include + +#define XER_SO (1 << 31) +#define XER_OV (1 << 30) +#define XER_CA (1 << 29) +#define XER_OV32 (1 << 19) +#define XER_CA32 (1 << 18) + +uint64_t saved; + +void sigill_handler(int sig, siginfo_t *si, void *ucontext) +{ + ucontext_t *uc = ucontext; + uc->uc_mcontext.regs->nip += 4; + saved = uc->uc_mcontext.regs->xer; + uc->uc_mcontext.regs->xer |= XER_OV | XER_OV32; +} + +int main(void) +{ + uint64_t initial = XER_CA | XER_CA32, restored; + struct sigaction sa = { + .sa_sigaction = sigill_handler, + .sa_flags = SA_SIGINFO + }; + + sigaction(SIGILL, &sa, NULL); + + asm("mtspr 1, %1\n\t" + ".long 0x0\n\t" + "mfspr %0, 1\n\t" + : "=r" (restored) + : "r" (initial)); + + assert(saved == initial); + assert(restored == (XER_OV | XER_OV32 | XER_CA | XER_CA32)); + + return 0; +} diff --git a/tests/tcg/sh4/Makefile.target b/tests/tcg/sh4/Makefile.target index 9d18d44612..0e96aeff16 100644 --- a/tests/tcg/sh4/Makefile.target +++ b/tests/tcg/sh4/Makefile.target @@ -5,3 +5,16 @@ # On sh Linux supports 4k, 8k, 16k and 64k pages (but only 4k currently works) EXTRA_RUNS+=run-test-mmap-4096 # run-test-mmap-8192 run-test-mmap-16384 run-test-mmap-65536 + +# This triggers failures for sh4-linux about 10% of the time. +# Random SIGSEGV at unpredictable guest address, cause unknown. +run-signals: signals + $(call skip-test, $<, "BROKEN") +run-plugin-signals-with-%: + $(call skip-test, $<, "BROKEN") + +# This test is currently broken: https://gitlab.com/qemu-project/qemu/-/issues/704 +run-linux-test: linux-test + $(call skip-test, $<, "BROKEN") +run-plugin-linux-test-with-%: + $(call skip-test, $<, "BROKEN") diff --git a/tests/tcg/x86_64/Makefile.target b/tests/tcg/x86_64/Makefile.target index 2151ea6302..d7a7385583 100644 --- a/tests/tcg/x86_64/Makefile.target +++ b/tests/tcg/x86_64/Makefile.target @@ -8,8 +8,12 @@ include $(SRC_PATH)/tests/tcg/i386/Makefile.target +ifneq ($(CONFIG_LINUX),) X86_64_TESTS += vsyscall TESTS=$(MULTIARCH_TESTS) $(X86_64_TESTS) test-x86_64 +else +TESTS=$(MULTIARCH_TESTS) +endif QEMU_OPTS += -cpu max test-x86_64: LDFLAGS+=-lm -lc diff --git a/tests/tcg/x86_64/system/kernel.ld b/tests/tcg/x86_64/system/kernel.ld index 49c12b04ae..ca5d6bd850 100644 --- a/tests/tcg/x86_64/system/kernel.ld +++ b/tests/tcg/x86_64/system/kernel.ld @@ -16,7 +16,10 @@ SECTIONS { *(.rodata) } :text - /* Keep build ID and PVH notes in same section */ + /DISCARD/ : { + *(.note.gnu*) + } + .notes : { *(.note.*) } :note diff --git a/tests/unit/check-qom-proplist.c b/tests/unit/check-qom-proplist.c index 48503e0dff..ed341088d3 100644 --- a/tests/unit/check-qom-proplist.c +++ b/tests/unit/check-qom-proplist.c @@ -488,7 +488,7 @@ static void test_dummy_badenum(void) g_assert(dobj == NULL); g_assert(err != NULL); g_assert_cmpstr(error_get_pretty(err), ==, - "Invalid parameter 'yeti'"); + "Parameter 'av' does not accept value 'yeti'"); g_assert(object_resolve_path_component(parent, "dummy0") == NULL); diff --git a/tests/unit/meson.build b/tests/unit/meson.build index 5736d285b2..acac3622ed 100644 --- a/tests/unit/meson.build +++ b/tests/unit/meson.build @@ -23,6 +23,7 @@ tests = { # all code tested by test-x86-cpuid is inside topology.h 'test-x86-cpuid': [], 'test-cutils': [], + 'test-div128': [], 'test-shift128': [], 'test-mul64': [], # all code tested by test-int128 is inside int128.h @@ -43,8 +44,9 @@ tests = { 'test-keyval': [testqapi], 'test-logging': [], 'test-uuid': [], - 'ptimer-test': ['ptimer-test-stubs.c', meson.source_root() / 'hw/core/ptimer.c'], + 'ptimer-test': ['ptimer-test-stubs.c', meson.project_source_root() / 'hw/core/ptimer.c'], 'test-qapi-util': [], + 'test-smp-parse': [qom, meson.project_source_root() / 'hw/core/machine-smp.c'], } if have_system or have_tools diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c index ce071b5fc5..2d3c17e566 100644 --- a/tests/unit/test-bdrv-drain.c +++ b/tests/unit/test-bdrv-drain.c @@ -65,8 +65,9 @@ static void co_reenter_bh(void *opaque) } static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, + BdrvRequestFlags flags) { BDRVTestState *s = bs->opaque; @@ -1106,8 +1107,9 @@ static void bdrv_test_top_close(BlockDriverState *bs) } static int coroutine_fn bdrv_test_top_co_preadv(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, + BdrvRequestFlags flags) { BDRVTestTopState *tts = bs->opaque; return bdrv_co_preadv(tts->wait_child, offset, bytes, qiov, flags); @@ -1855,10 +1857,10 @@ static void bdrv_replace_test_close(BlockDriverState *bs) * Set .has_read to true and return success. */ static int coroutine_fn bdrv_replace_test_co_preadv(BlockDriverState *bs, - uint64_t offset, - uint64_t bytes, + int64_t offset, + int64_t bytes, QEMUIOVector *qiov, - int flags) + BdrvRequestFlags flags) { BDRVReplaceTestState *s = bs->opaque; diff --git a/tests/unit/test-block-iothread.c b/tests/unit/test-block-iothread.c index c39e70b2f5..aea660aeed 100644 --- a/tests/unit/test-block-iothread.c +++ b/tests/unit/test-block-iothread.c @@ -31,15 +31,24 @@ #include "qemu/main-loop.h" #include "iothread.h" -static int coroutine_fn bdrv_test_co_prwv(BlockDriverState *bs, - uint64_t offset, uint64_t bytes, - QEMUIOVector *qiov, int flags) +static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs, + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, + BdrvRequestFlags flags) +{ + return 0; +} + +static int coroutine_fn bdrv_test_co_pwritev(BlockDriverState *bs, + int64_t offset, int64_t bytes, + QEMUIOVector *qiov, + BdrvRequestFlags flags) { return 0; } static int coroutine_fn bdrv_test_co_pdiscard(BlockDriverState *bs, - int64_t offset, int bytes) + int64_t offset, int64_t bytes) { return 0; } @@ -66,8 +75,8 @@ static BlockDriver bdrv_test = { .format_name = "test", .instance_size = 1, - .bdrv_co_preadv = bdrv_test_co_prwv, - .bdrv_co_pwritev = bdrv_test_co_prwv, + .bdrv_co_preadv = bdrv_test_co_preadv, + .bdrv_co_pwritev = bdrv_test_co_pwritev, .bdrv_co_pdiscard = bdrv_test_co_pdiscard, .bdrv_co_truncate = bdrv_test_co_truncate, .bdrv_co_block_status = bdrv_test_co_block_status, diff --git a/tests/unit/test-blockjob.c b/tests/unit/test-blockjob.c index dcacfa6c7c..4c9e1bf1e5 100644 --- a/tests/unit/test-blockjob.c +++ b/tests/unit/test-blockjob.c @@ -230,7 +230,7 @@ static void cancel_common(CancelJob *s) ctx = job->job.aio_context; aio_context_acquire(ctx); - job_cancel_sync(&job->job); + job_cancel_sync(&job->job, true); if (sts != JOB_STATUS_CREATED && sts != JOB_STATUS_CONCLUDED) { Job *dummy = &job->job; job_dismiss(&dummy, &error_abort); diff --git a/tests/unit/test-div128.c b/tests/unit/test-div128.c new file mode 100644 index 0000000000..0bc25fe4a8 --- /dev/null +++ b/tests/unit/test-div128.c @@ -0,0 +1,197 @@ +/* + * Test 128-bit division functions + * + * Copyright (c) 2021 Instituto de Pesquisas Eldorado (eldorado.org.br) + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/host-utils.h" + +typedef struct { + uint64_t high; + uint64_t low; + uint64_t rhigh; + uint64_t rlow; + uint64_t divisor; + uint64_t remainder; +} test_data_unsigned; + +typedef struct { + int64_t high; + uint64_t low; + int64_t rhigh; + uint64_t rlow; + int64_t divisor; + int64_t remainder; +} test_data_signed; + +static const test_data_unsigned test_table_unsigned[] = { + /* Dividend fits in 64 bits */ + { 0x0000000000000000ULL, 0x0000000000000000ULL, + 0x0000000000000000ULL, 0x0000000000000000ULL, + 0x0000000000000001ULL, 0x0000000000000000ULL}, + { 0x0000000000000000ULL, 0x0000000000000001ULL, + 0x0000000000000000ULL, 0x0000000000000001ULL, + 0x0000000000000001ULL, 0x0000000000000000ULL}, + { 0x0000000000000000ULL, 0x0000000000000003ULL, + 0x0000000000000000ULL, 0x0000000000000001ULL, + 0x0000000000000002ULL, 0x0000000000000001ULL}, + { 0x0000000000000000ULL, 0x8000000000000000ULL, + 0x0000000000000000ULL, 0x8000000000000000ULL, + 0x0000000000000001ULL, 0x0000000000000000ULL}, + { 0x0000000000000000ULL, 0xa000000000000000ULL, + 0x0000000000000000ULL, 0x0000000000000002ULL, + 0x4000000000000000ULL, 0x2000000000000000ULL}, + { 0x0000000000000000ULL, 0x8000000000000000ULL, + 0x0000000000000000ULL, 0x0000000000000001ULL, + 0x8000000000000000ULL, 0x0000000000000000ULL}, + + /* Dividend > 64 bits, with MSB 0 */ + { 0x123456789abcdefeULL, 0xefedcba987654321ULL, + 0x123456789abcdefeULL, 0xefedcba987654321ULL, + 0x0000000000000001ULL, 0x0000000000000000ULL}, + { 0x123456789abcdefeULL, 0xefedcba987654321ULL, + 0x0000000000000001ULL, 0x000000000000000dULL, + 0x123456789abcdefeULL, 0x03456789abcdf03bULL}, + { 0x123456789abcdefeULL, 0xefedcba987654321ULL, + 0x0123456789abcdefULL, 0xeefedcba98765432ULL, + 0x0000000000000010ULL, 0x0000000000000001ULL}, + + /* Dividend > 64 bits, with MSB 1 */ + { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL, + 0xfeeddccbbaa99887ULL, 0x766554433221100fULL, + 0x0000000000000001ULL, 0x0000000000000000ULL}, + { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL, + 0x0000000000000001ULL, 0x0000000000000000ULL, + 0xfeeddccbbaa99887ULL, 0x766554433221100fULL}, + { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL, + 0x0feeddccbbaa9988ULL, 0x7766554433221100ULL, + 0x0000000000000010ULL, 0x000000000000000fULL}, + { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL, + 0x000000000000000eULL, 0x00f0f0f0f0f0f35aULL, + 0x123456789abcdefeULL, 0x0f8922bc55ef90c3ULL}, + + /** + * Divisor == 64 bits, with MSB 1 + * and high 64 bits of dividend >= divisor + * (for testing normalization) + */ + { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL, + 0x0000000000000001ULL, 0x0000000000000000ULL, + 0xfeeddccbbaa99887ULL, 0x766554433221100fULL}, + { 0xfeeddccbbaa99887ULL, 0x766554433221100fULL, + 0x0000000000000001ULL, 0xfddbb9977553310aULL, + 0x8000000000000001ULL, 0x78899aabbccddf05ULL}, + + /* Dividend > 64 bits, divisor almost as big */ + { 0x0000000000000001ULL, 0x23456789abcdef01ULL, + 0x0000000000000000ULL, 0x000000000000000fULL, + 0x123456789abcdefeULL, 0x123456789abcde1fULL}, +}; + +static const test_data_signed test_table_signed[] = { + /* Positive dividend, positive/negative divisors */ + { 0x0000000000000000LL, 0x0000000000bc614eULL, + 0x0000000000000000LL, 0x0000000000bc614eULL, + 0x0000000000000001LL, 0x0000000000000000LL}, + { 0x0000000000000000LL, 0x0000000000bc614eULL, + 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, + 0xffffffffffffffffLL, 0x0000000000000000LL}, + { 0x0000000000000000LL, 0x0000000000bc614eULL, + 0x0000000000000000LL, 0x00000000005e30a7ULL, + 0x0000000000000002LL, 0x0000000000000000LL}, + { 0x0000000000000000LL, 0x0000000000bc614eULL, + 0xffffffffffffffffLL, 0xffffffffffa1cf59ULL, + 0xfffffffffffffffeLL, 0x0000000000000000LL}, + { 0x0000000000000000LL, 0x0000000000bc614eULL, + 0x0000000000000000LL, 0x0000000000178c29ULL, + 0x0000000000000008LL, 0x0000000000000006LL}, + { 0x0000000000000000LL, 0x0000000000bc614eULL, + 0xffffffffffffffffLL, 0xffffffffffe873d7ULL, + 0xfffffffffffffff8LL, 0x0000000000000006LL}, + { 0x0000000000000000LL, 0x0000000000bc614eULL, + 0x0000000000000000LL, 0x000000000000550dULL, + 0x0000000000000237LL, 0x0000000000000183LL}, + { 0x0000000000000000LL, 0x0000000000bc614eULL, + 0xffffffffffffffffLL, 0xffffffffffffaaf3ULL, + 0xfffffffffffffdc9LL, 0x0000000000000183LL}, + + /* Negative dividend, positive/negative divisors */ + { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, + 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, + 0x0000000000000001LL, 0x0000000000000000LL}, + { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, + 0x0000000000000000LL, 0x0000000000bc614eULL, + 0xffffffffffffffffLL, 0x0000000000000000LL}, + { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, + 0xffffffffffffffffLL, 0xffffffffffa1cf59ULL, + 0x0000000000000002LL, 0x0000000000000000LL}, + { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, + 0x0000000000000000LL, 0x00000000005e30a7ULL, + 0xfffffffffffffffeLL, 0x0000000000000000LL}, + { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, + 0xffffffffffffffffLL, 0xffffffffffe873d7ULL, + 0x0000000000000008LL, 0xfffffffffffffffaLL}, + { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, + 0x0000000000000000LL, 0x0000000000178c29ULL, + 0xfffffffffffffff8LL, 0xfffffffffffffffaLL}, + { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, + 0xffffffffffffffffLL, 0xffffffffffffaaf3ULL, + 0x0000000000000237LL, 0xfffffffffffffe7dLL}, + { 0xffffffffffffffffLL, 0xffffffffff439eb2ULL, + 0x0000000000000000LL, 0x000000000000550dULL, + 0xfffffffffffffdc9LL, 0xfffffffffffffe7dLL}, +}; + +static void test_divu128(void) +{ + int i; + uint64_t rem; + test_data_unsigned tmp; + + for (i = 0; i < ARRAY_SIZE(test_table_unsigned); ++i) { + tmp = test_table_unsigned[i]; + + rem = divu128(&tmp.low, &tmp.high, tmp.divisor); + g_assert_cmpuint(tmp.low, ==, tmp.rlow); + g_assert_cmpuint(tmp.high, ==, tmp.rhigh); + g_assert_cmpuint(rem, ==, tmp.remainder); + } +} + +static void test_divs128(void) +{ + int i; + int64_t rem; + test_data_signed tmp; + + for (i = 0; i < ARRAY_SIZE(test_table_signed); ++i) { + tmp = test_table_signed[i]; + + rem = divs128(&tmp.low, &tmp.high, tmp.divisor); + g_assert_cmpuint(tmp.low, ==, tmp.rlow); + g_assert_cmpuint(tmp.high, ==, tmp.rhigh); + g_assert_cmpuint(rem, ==, tmp.remainder); + } +} + +int main(int argc, char **argv) +{ + g_test_init(&argc, &argv, NULL); + g_test_add_func("/host-utils/test_divu128", test_divu128); + g_test_add_func("/host-utils/test_divs128", test_divs128); + return g_test_run(); +} diff --git a/tests/unit/test-smp-parse.c b/tests/unit/test-smp-parse.c new file mode 100644 index 0000000000..cbe0c99049 --- /dev/null +++ b/tests/unit/test-smp-parse.c @@ -0,0 +1,594 @@ +/* + * SMP parsing unit-tests + * + * Copyright (c) 2021 Huawei Technologies Co., Ltd + * + * Authors: + * Yanan Wang + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING.LIB file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qom/object.h" +#include "qemu/module.h" +#include "qapi/error.h" + +#include "hw/boards.h" + +#define T true +#define F false + +#define MIN_CPUS 1 /* set the min CPUs supported by the machine as 1 */ +#define MAX_CPUS 512 /* set the max CPUs supported by the machine as 512 */ + +/* + * Used to define the generic 3-level CPU topology hierarchy + * -sockets/cores/threads + */ +#define SMP_CONFIG_GENERIC(ha, a, hb, b, hc, c, hd, d, he, e) \ + { \ + .has_cpus = ha, .cpus = a, \ + .has_sockets = hb, .sockets = b, \ + .has_cores = hc, .cores = c, \ + .has_threads = hd, .threads = d, \ + .has_maxcpus = he, .maxcpus = e, \ + } + +#define CPU_TOPOLOGY_GENERIC(a, b, c, d, e) \ + { \ + .cpus = a, \ + .sockets = b, \ + .cores = c, \ + .threads = d, \ + .max_cpus = e, \ + } + +/* + * Currently a 4-level topology hierarchy is supported on PC machines + * -sockets/dies/cores/threads + */ +#define SMP_CONFIG_WITH_DIES(ha, a, hb, b, hc, c, hd, d, he, e, hf, f) \ + { \ + .has_cpus = ha, .cpus = a, \ + .has_sockets = hb, .sockets = b, \ + .has_dies = hc, .dies = c, \ + .has_cores = hd, .cores = d, \ + .has_threads = he, .threads = e, \ + .has_maxcpus = hf, .maxcpus = f, \ + } + +/** + * @config - the given SMP configuration + * @expect_prefer_sockets - the expected parsing result for the + * valid configuration, when sockets are preferred over cores + * @expect_prefer_cores - the expected parsing result for the + * valid configuration, when cores are preferred over sockets + * @expect_error - the expected error report when the given + * configuration is invalid + */ +typedef struct SMPTestData { + SMPConfiguration config; + CpuTopology expect_prefer_sockets; + CpuTopology expect_prefer_cores; + const char *expect_error; +} SMPTestData; + +/* Type info of the tested machine */ +static const TypeInfo smp_machine_info = { + .name = TYPE_MACHINE, + .parent = TYPE_OBJECT, + .class_size = sizeof(MachineClass), + .instance_size = sizeof(MachineState), +}; + +/* + * List all the possible valid sub-collections of the generic 5 + * topology parameters (i.e. cpus/maxcpus/sockets/cores/threads), + * then test the automatic calculation algorithm of the missing + * values in the parser. + */ +static struct SMPTestData data_generic_valid[] = { + { + /* config: no configuration provided + * expect: cpus=1,sockets=1,cores=1,threads=1,maxcpus=1 */ + .config = SMP_CONFIG_GENERIC(F, 0, F, 0, F, 0, F, 0, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(1, 1, 1, 1, 1), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(1, 1, 1, 1, 1), + }, { + /* config: -smp 8 + * prefer_sockets: cpus=8,sockets=8,cores=1,threads=1,maxcpus=8 + * prefer_cores: cpus=8,sockets=1,cores=8,threads=1,maxcpus=8 */ + .config = SMP_CONFIG_GENERIC(T, 8, F, 0, F, 0, F, 0, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 8, 1, 1, 8), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 1, 8, 1, 8), + }, { + /* config: -smp sockets=2 + * expect: cpus=2,sockets=2,cores=1,threads=1,maxcpus=2 */ + .config = SMP_CONFIG_GENERIC(F, 0, T, 2, F, 0, F, 0, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(2, 2, 1, 1, 2), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(2, 2, 1, 1, 2), + }, { + /* config: -smp cores=4 + * expect: cpus=4,sockets=1,cores=4,threads=1,maxcpus=4 */ + .config = SMP_CONFIG_GENERIC(F, 0, F, 0, T, 4, F, 0, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(4, 1, 4, 1, 4), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(4, 1, 4, 1, 4), + }, { + /* config: -smp threads=2 + * expect: cpus=2,sockets=1,cores=1,threads=2,maxcpus=2 */ + .config = SMP_CONFIG_GENERIC(F, 0, F, 0, F, 0, T, 2, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(2, 1, 1, 2, 2), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(2, 1, 1, 2, 2), + }, { + /* config: -smp maxcpus=16 + * prefer_sockets: cpus=16,sockets=16,cores=1,threads=1,maxcpus=16 + * prefer_cores: cpus=16,sockets=1,cores=16,threads=1,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(F, 0, F, 0, F, 0, F, 0, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(16, 16, 1, 1, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(16, 1, 16, 1, 16), + }, { + /* config: -smp 8,sockets=2 + * expect: cpus=8,sockets=2,cores=4,threads=1,maxcpus=8 */ + .config = SMP_CONFIG_GENERIC(T, 8, T, 2, F, 0, F, 0, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 2, 4, 1, 8), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 2, 4, 1, 8), + }, { + /* config: -smp 8,cores=4 + * expect: cpus=8,sockets=2,cores=4,threads=1,maxcpus=8 */ + .config = SMP_CONFIG_GENERIC(T, 8, F, 0, T, 4, F, 0, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 2, 4, 1, 8), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 2, 4, 1, 8), + }, { + /* config: -smp 8,threads=2 + * prefer_sockets: cpus=8,sockets=4,cores=1,threads=2,maxcpus=8 + * prefer_cores: cpus=8,sockets=1,cores=4,threads=2,maxcpus=8 */ + .config = SMP_CONFIG_GENERIC(T, 8, F, 0, F, 0, T, 2, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 4, 1, 2, 8), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 1, 4, 2, 8), + }, { + /* config: -smp 8,maxcpus=16 + * prefer_sockets: cpus=8,sockets=16,cores=1,threads=1,maxcpus=16 + * prefer_cores: cpus=8,sockets=1,cores=16,threads=1,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(T, 8, F, 0, F, 0, F, 0, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 16, 1, 1, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 1, 16, 1, 16), + }, { + /* config: -smp sockets=2,cores=4 + * expect: cpus=8,sockets=2,cores=4,threads=1,maxcpus=8 */ + .config = SMP_CONFIG_GENERIC(F, 0, T, 2, T, 4, F, 0, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 2, 4, 1, 8), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 2, 4, 1, 8), + }, { + /* config: -smp sockets=2,threads=2 + * expect: cpus=4,sockets=2,cores=1,threads=2,maxcpus=4 */ + .config = SMP_CONFIG_GENERIC(F, 0, T, 2, F, 0, T, 2, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(4, 2, 1, 2, 4), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(4, 2, 1, 2, 4), + }, { + /* config: -smp sockets=2,maxcpus=16 + * expect: cpus=16,sockets=2,cores=8,threads=1,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(F, 0, T, 2, F, 0, F, 0, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(16, 2, 8, 1, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(16, 2, 8, 1, 16), + }, { + /* config: -smp cores=4,threads=2 + * expect: cpus=8,sockets=1,cores=4,threads=2,maxcpus=8 */ + .config = SMP_CONFIG_GENERIC(F, 0, F, 0, T, 4, T, 2, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 1, 4, 2, 8), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 1, 4, 2, 8), + }, { + /* config: -smp cores=4,maxcpus=16 + * expect: cpus=16,sockets=4,cores=4,threads=1,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(F, 0, F, 0, T, 4, F, 0, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(16, 4, 4, 1, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(16, 4, 4, 1, 16), + }, { + /* config: -smp threads=2,maxcpus=16 + * prefer_sockets: cpus=16,sockets=8,cores=1,threads=2,maxcpus=16 + * prefer_cores: cpus=16,sockets=1,cores=8,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(F, 0, F, 0, F, 0, T, 2, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(16, 8, 1, 2, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(16, 1, 8, 2, 16), + }, { + /* config: -smp 8,sockets=2,cores=4 + * expect: cpus=8,sockets=2,cores=4,threads=1,maxcpus=8 */ + .config = SMP_CONFIG_GENERIC(T, 8, T, 2, T, 4, F, 0, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 2, 4, 1, 8), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 2, 4, 1, 8), + }, { + /* config: -smp 8,sockets=2,threads=2 + * expect: cpus=8,sockets=2,cores=2,threads=2,maxcpus=8 */ + .config = SMP_CONFIG_GENERIC(T, 8, T, 2, F, 0, T, 2, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 2, 2, 2, 8), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 2, 2, 2, 8), + }, { + /* config: -smp 8,sockets=2,maxcpus=16 + * expect: cpus=8,sockets=2,cores=8,threads=1,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(T, 8, T, 2, F, 0, F, 0, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 2, 8, 1, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 2, 8, 1, 16), + }, { + /* config: -smp 8,cores=4,threads=2 + * expect: cpus=8,sockets=1,cores=4,threads=2,maxcpus=8 */ + .config = SMP_CONFIG_GENERIC(T, 8, F, 0, T, 4, T, 2, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 1, 4, 2, 8), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 1, 4, 2, 8), + }, { + /* config: -smp 8,cores=4,maxcpus=16 + * expect: cpus=8,sockets=4,cores=4,threads=1,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(T, 8, F, 0, T, 4, F, 0, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 4, 4, 1, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 4, 4, 1, 16), + }, { + /* config: -smp 8,threads=2,maxcpus=16 + * prefer_sockets: cpus=8,sockets=8,cores=1,threads=2,maxcpus=16 + * prefer_cores: cpus=8,sockets=1,cores=8,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(T, 8, F, 0, F, 0, T, 2, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 8, 1, 2, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 1, 8, 2, 16), + }, { + /* config: -smp sockets=2,cores=4,threads=2 + * expect: cpus=16,sockets=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(F, 0, T, 2, T, 4, T, 2, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(16, 2, 4, 2, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(16, 2, 4, 2, 16), + }, { + /* config: -smp sockets=2,cores=4,maxcpus=16 + * expect: cpus=16,sockets=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(F, 0, T, 2, T, 4, F, 0, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(16, 2, 4, 2, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(16, 2, 4, 2, 16), + }, { + /* config: -smp sockets=2,threads=2,maxcpus=16 + * expect: cpus=16,sockets=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(F, 0, T, 2, F, 0, T, 2, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(16, 2, 4, 2, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(16, 2, 4, 2, 16), + }, { + /* config: -smp cores=4,threads=2,maxcpus=16 + * expect: cpus=16,sockets=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(F, 0, F, 0, T, 4, T, 2, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(16, 2, 4, 2, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(16, 2, 4, 2, 16), + }, { + /* config: -smp 8,sockets=2,cores=4,threads=1 + * expect: cpus=8,sockets=2,cores=4,threads=1,maxcpus=8 */ + .config = SMP_CONFIG_GENERIC(T, 8, T, 2, T, 4, T, 1, F, 0), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 2, 4, 1, 8), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 2, 4, 1, 8), + }, { + /* config: -smp 8,sockets=2,cores=4,maxcpus=16 + * expect: cpus=8,sockets=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(T, 8, T, 2, T, 4, F, 0, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 2, 4, 2, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 2, 4, 2, 16), + }, { + /* config: -smp 8,sockets=2,threads=2,maxcpus=16 + * expect: cpus=8,sockets=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(T, 8, T, 2, F, 0, T, 2, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 2, 4, 2, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 2, 4, 2, 16), + }, { + /* config: -smp 8,cores=4,threads=2,maxcpus=16 + * expect: cpus=8,sockets=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(T, 8, F, 0, T, 4, T, 2, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 2, 4, 2, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 2, 4, 2, 16), + }, { + /* config: -smp sockets=2,cores=4,threads=2,maxcpus=16 + * expect: cpus=16,sockets=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(F, 0, T, 2, T, 4, T, 2, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(16, 2, 4, 2, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(16, 2, 4, 2, 16), + }, { + /* config: -smp 8,sockets=2,cores=4,threads=2,maxcpus=16 + * expect: cpus=8,sockets=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(T, 8, T, 2, T, 4, T, 2, T, 16), + .expect_prefer_sockets = CPU_TOPOLOGY_GENERIC(8, 2, 4, 2, 16), + .expect_prefer_cores = CPU_TOPOLOGY_GENERIC(8, 2, 4, 2, 16), + }, +}; + +static struct SMPTestData data_generic_invalid[] = { + { + /* config: -smp 2,dies=2 */ + .config = SMP_CONFIG_WITH_DIES(T, 2, F, 0, T, 2, F, 0, F, 0, F, 0), + .expect_error = "dies not supported by this machine's CPU topology", + }, { + /* config: -smp 8,sockets=2,cores=4,threads=2,maxcpus=8 */ + .config = SMP_CONFIG_GENERIC(T, 8, T, 2, T, 4, T, 2, T, 8), + .expect_error = "Invalid CPU topology: " + "product of the hierarchy must match maxcpus: " + "sockets (2) * cores (4) * threads (2) " + "!= maxcpus (8)", + }, { + /* config: -smp 18,sockets=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_GENERIC(T, 18, T, 2, T, 4, T, 2, T, 16), + .expect_error = "Invalid CPU topology: " + "maxcpus must be equal to or greater than smp: " + "sockets (2) * cores (4) * threads (2) " + "== maxcpus (16) < smp_cpus (18)", + }, { + /* config: -smp 1 + * should tweak the supported min CPUs to 2 for testing */ + .config = SMP_CONFIG_GENERIC(T, 1, F, 0, F, 0, F, 0, F, 0), + .expect_error = "Invalid SMP CPUs 1. The min CPUs supported " + "by machine '(null)' is 2", + }, { + /* config: -smp 512 + * should tweak the supported max CPUs to 511 for testing */ + .config = SMP_CONFIG_GENERIC(T, 512, F, 0, F, 0, F, 0, F, 0), + .expect_error = "Invalid SMP CPUs 512. The max CPUs supported " + "by machine '(null)' is 511", + }, +}; + +static struct SMPTestData data_with_dies_invalid[] = { + { + /* config: -smp 16,sockets=2,dies=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_WITH_DIES(T, 16, T, 2, T, 2, T, 4, T, 2, T, 16), + .expect_error = "Invalid CPU topology: " + "product of the hierarchy must match maxcpus: " + "sockets (2) * dies (2) * cores (4) * threads (2) " + "!= maxcpus (16)", + }, { + /* config: -smp 34,sockets=2,dies=2,cores=4,threads=2,maxcpus=32 */ + .config = SMP_CONFIG_WITH_DIES(T, 34, T, 2, T, 2, T, 4, T, 2, T, 32), + .expect_error = "Invalid CPU topology: " + "maxcpus must be equal to or greater than smp: " + "sockets (2) * dies (2) * cores (4) * threads (2) " + "== maxcpus (32) < smp_cpus (34)", + }, +}; + +static char *smp_config_to_string(SMPConfiguration *config) +{ + return g_strdup_printf( + "(SMPConfiguration) {\n" + " .has_cpus = %5s, cpus = %" PRId64 ",\n" + " .has_sockets = %5s, sockets = %" PRId64 ",\n" + " .has_dies = %5s, dies = %" PRId64 ",\n" + " .has_cores = %5s, cores = %" PRId64 ",\n" + " .has_threads = %5s, threads = %" PRId64 ",\n" + " .has_maxcpus = %5s, maxcpus = %" PRId64 ",\n" + "}", + config->has_cpus ? "true" : "false", config->cpus, + config->has_sockets ? "true" : "false", config->sockets, + config->has_dies ? "true" : "false", config->dies, + config->has_cores ? "true" : "false", config->cores, + config->has_threads ? "true" : "false", config->threads, + config->has_maxcpus ? "true" : "false", config->maxcpus); +} + +static char *cpu_topology_to_string(CpuTopology *topo) +{ + return g_strdup_printf( + "(CpuTopology) {\n" + " .cpus = %u,\n" + " .sockets = %u,\n" + " .dies = %u,\n" + " .cores = %u,\n" + " .threads = %u,\n" + " .max_cpus = %u,\n" + "}", + topo->cpus, topo->sockets, topo->dies, + topo->cores, topo->threads, topo->max_cpus); +} + +static void check_parse(MachineState *ms, SMPConfiguration *config, + CpuTopology *expect_topo, const char *expect_err, + bool is_valid) +{ + g_autofree char *config_str = smp_config_to_string(config); + g_autofree char *expect_topo_str = cpu_topology_to_string(expect_topo); + g_autofree char *output_topo_str = NULL; + Error *err = NULL; + + /* call the generic parser smp_parse() */ + smp_parse(ms, config, &err); + + output_topo_str = cpu_topology_to_string(&ms->smp); + + /* when the configuration is supposed to be valid */ + if (is_valid) { + if ((err == NULL) && + (ms->smp.cpus == expect_topo->cpus) && + (ms->smp.sockets == expect_topo->sockets) && + (ms->smp.dies == expect_topo->dies) && + (ms->smp.cores == expect_topo->cores) && + (ms->smp.threads == expect_topo->threads) && + (ms->smp.max_cpus == expect_topo->max_cpus)) { + return; + } + + if (err != NULL) { + g_printerr("Test smp_parse failed!\n" + "Input configuration: %s\n" + "Should be valid: yes\n" + "Expected topology: %s\n\n" + "Result is valid: no\n" + "Output error report: %s\n", + config_str, expect_topo_str, error_get_pretty(err)); + goto end; + } + + g_printerr("Test smp_parse failed!\n" + "Input configuration: %s\n" + "Should be valid: yes\n" + "Expected topology: %s\n\n" + "Result is valid: yes\n" + "Output topology: %s\n", + config_str, expect_topo_str, output_topo_str); + goto end; + } + + /* when the configuration is supposed to be invalid */ + if (err != NULL) { + if (expect_err == NULL || + g_str_equal(expect_err, error_get_pretty(err))) { + error_free(err); + return; + } + + g_printerr("Test smp_parse failed!\n" + "Input configuration: %s\n" + "Should be valid: no\n" + "Expected error report: %s\n\n" + "Result is valid: no\n" + "Output error report: %s\n", + config_str, expect_err, error_get_pretty(err)); + goto end; + } + + g_printerr("Test smp_parse failed!\n" + "Input configuration: %s\n" + "Should be valid: no\n" + "Expected error report: %s\n\n" + "Result is valid: yes\n" + "Output topology: %s\n", + config_str, expect_err, output_topo_str); + +end: + if (err != NULL) { + error_free(err); + } + + abort(); +} + +static void smp_parse_test(MachineState *ms, SMPTestData *data, bool is_valid) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + + mc->smp_props.prefer_sockets = true; + check_parse(ms, &data->config, &data->expect_prefer_sockets, + data->expect_error, is_valid); + + mc->smp_props.prefer_sockets = false; + check_parse(ms, &data->config, &data->expect_prefer_cores, + data->expect_error, is_valid); +} + +/* The parsed results of the unsupported parameters should be 1 */ +static void unsupported_params_init(MachineClass *mc, SMPTestData *data) +{ + if (!mc->smp_props.dies_supported) { + data->expect_prefer_sockets.dies = 1; + data->expect_prefer_cores.dies = 1; + } +} + +/* Reset the related machine properties before each sub-test */ +static void smp_machine_class_init(MachineClass *mc) +{ + mc->min_cpus = MIN_CPUS; + mc->max_cpus = MAX_CPUS; + + mc->smp_props.prefer_sockets = true; + mc->smp_props.dies_supported = false; +} + +static void test_generic(void) +{ + Object *obj = object_new(TYPE_MACHINE); + MachineState *ms = MACHINE(obj); + MachineClass *mc = MACHINE_GET_CLASS(obj); + SMPTestData *data = &(SMPTestData){{ }}; + int i; + + smp_machine_class_init(mc); + + for (i = 0; i < ARRAY_SIZE(data_generic_valid); i++) { + *data = data_generic_valid[i]; + unsupported_params_init(mc, data); + + smp_parse_test(ms, data, true); + + /* Unsupported parameters can be provided with their values as 1 */ + data->config.has_dies = true; + data->config.dies = 1; + smp_parse_test(ms, data, true); + } + + /* Reset the supported min CPUs and max CPUs */ + mc->min_cpus = 2; + mc->max_cpus = 511; + + for (i = 0; i < ARRAY_SIZE(data_generic_invalid); i++) { + *data = data_generic_invalid[i]; + unsupported_params_init(mc, data); + + smp_parse_test(ms, data, false); + } + + object_unref(obj); +} + +static void test_with_dies(void) +{ + Object *obj = object_new(TYPE_MACHINE); + MachineState *ms = MACHINE(obj); + MachineClass *mc = MACHINE_GET_CLASS(obj); + SMPTestData *data = &(SMPTestData){{ }}; + unsigned int num_dies = 2; + int i; + + smp_machine_class_init(mc); + mc->smp_props.dies_supported = true; + + for (i = 0; i < ARRAY_SIZE(data_generic_valid); i++) { + *data = data_generic_valid[i]; + unsupported_params_init(mc, data); + + /* when dies parameter is omitted, it will be set as 1 */ + data->expect_prefer_sockets.dies = 1; + data->expect_prefer_cores.dies = 1; + + smp_parse_test(ms, data, true); + + /* when dies parameter is specified */ + data->config.has_dies = true; + data->config.dies = num_dies; + if (data->config.has_cpus) { + data->config.cpus *= num_dies; + } + if (data->config.has_maxcpus) { + data->config.maxcpus *= num_dies; + } + + data->expect_prefer_sockets.dies = num_dies; + data->expect_prefer_sockets.cpus *= num_dies; + data->expect_prefer_sockets.max_cpus *= num_dies; + data->expect_prefer_cores.dies = num_dies; + data->expect_prefer_cores.cpus *= num_dies; + data->expect_prefer_cores.max_cpus *= num_dies; + + smp_parse_test(ms, data, true); + } + + for (i = 0; i < ARRAY_SIZE(data_with_dies_invalid); i++) { + *data = data_with_dies_invalid[i]; + unsupported_params_init(mc, data); + + smp_parse_test(ms, data, false); + } + + object_unref(obj); +} + +int main(int argc, char *argv[]) +{ + g_test_init(&argc, &argv, NULL); + + module_call_init(MODULE_INIT_QOM); + type_register_static(&smp_machine_info); + + g_test_add_func("/test-smp-parse/generic", test_generic); + g_test_add_func("/test-smp-parse/with_dies", test_with_dies); + + g_test_run(); + + return 0; +} diff --git a/tests/vm/openbsd b/tests/vm/openbsd index c4c78a80f1..337fe7c303 100755 --- a/tests/vm/openbsd +++ b/tests/vm/openbsd @@ -22,8 +22,8 @@ class OpenBSDVM(basevm.BaseVM): name = "openbsd" arch = "x86_64" - link = "https://cdn.openbsd.org/pub/OpenBSD/6.9/amd64/install69.iso" - csum = "140d26548aec680e34bb5f82295414228e7f61e4f5e7951af066014fda2d6e43" + link = "https://cdn.openbsd.org/pub/OpenBSD/7.0/amd64/install70.iso" + csum = "1882f9a23c9800e5dba3dbd2cf0126f552605c915433ef4c5bb672610a4ca3a4" size = "20G" pkgs = [ # tools @@ -95,10 +95,9 @@ class OpenBSDVM(basevm.BaseVM): self.console_wait_send("Terminal type", "xterm\n") self.console_wait_send("System hostname", "openbsd\n") self.console_wait_send("Which network interface", "vio0\n") - self.console_wait_send("IPv4 address", "dhcp\n") + self.console_wait_send("IPv4 address", "autoconf\n") self.console_wait_send("IPv6 address", "none\n") self.console_wait_send("Which network interface", "done\n") - self.console_wait_send("DNS domain name", "localnet\n") self.console_wait("Password for root account") self.console_send("%s\n" % self._config["root_pass"]) self.console_wait("Password for root account") diff --git a/tools/virtiofsd/fuse_virtio.c b/tools/virtiofsd/fuse_virtio.c index 8f4fd165b9..60b96470c5 100644 --- a/tools/virtiofsd/fuse_virtio.c +++ b/tools/virtiofsd/fuse_virtio.c @@ -82,12 +82,6 @@ struct fv_VuDev { struct fv_QueueInfo **qi; }; -/* From spec */ -struct virtio_fs_config { - char tag[36]; - uint32_t num_queues; -}; - /* Callback from libvhost-user */ static uint64_t fv_get_features(VuDev *dev) { @@ -249,6 +243,21 @@ static void vu_dispatch_unlock(struct fv_VuDev *vud) assert(ret == 0); } +static void vq_send_element(struct fv_QueueInfo *qi, VuVirtqElement *elem, + ssize_t len) +{ + struct fuse_session *se = qi->virtio_dev->se; + VuDev *dev = &se->virtio_dev->dev; + VuVirtq *q = vu_get_queue(dev, qi->qidx); + + vu_dispatch_rdlock(qi->virtio_dev); + pthread_mutex_lock(&qi->vq_lock); + vu_queue_push(dev, q, elem, len); + vu_queue_notify(dev, q); + pthread_mutex_unlock(&qi->vq_lock); + vu_dispatch_unlock(qi->virtio_dev); +} + /* * Called back by ll whenever it wants to send a reply/message back * The 1st element of the iov starts with the fuse_out_header @@ -259,8 +268,6 @@ int virtio_send_msg(struct fuse_session *se, struct fuse_chan *ch, { FVRequest *req = container_of(ch, FVRequest, ch); struct fv_QueueInfo *qi = ch->qi; - VuDev *dev = &se->virtio_dev->dev; - VuVirtq *q = vu_get_queue(dev, qi->qidx); VuVirtqElement *elem = &req->elem; int ret = 0; @@ -302,13 +309,7 @@ int virtio_send_msg(struct fuse_session *se, struct fuse_chan *ch, copy_iov(iov, count, in_sg, in_num, tosend_len); - vu_dispatch_rdlock(qi->virtio_dev); - pthread_mutex_lock(&qi->vq_lock); - vu_queue_push(dev, q, elem, tosend_len); - vu_queue_notify(dev, q); - pthread_mutex_unlock(&qi->vq_lock); - vu_dispatch_unlock(qi->virtio_dev); - + vq_send_element(qi, elem, tosend_len); req->reply_sent = true; err: @@ -327,8 +328,6 @@ int virtio_send_data_iov(struct fuse_session *se, struct fuse_chan *ch, { FVRequest *req = container_of(ch, FVRequest, ch); struct fv_QueueInfo *qi = ch->qi; - VuDev *dev = &se->virtio_dev->dev; - VuVirtq *q = vu_get_queue(dev, qi->qidx); VuVirtqElement *elem = &req->elem; int ret = 0; g_autofree struct iovec *in_sg_cpy = NULL; @@ -436,12 +435,7 @@ int virtio_send_data_iov(struct fuse_session *se, struct fuse_chan *ch, out_sg->len = tosend_len; } - vu_dispatch_rdlock(qi->virtio_dev); - pthread_mutex_lock(&qi->vq_lock); - vu_queue_push(dev, q, elem, tosend_len); - vu_queue_notify(dev, q); - pthread_mutex_unlock(&qi->vq_lock); - vu_dispatch_unlock(qi->virtio_dev); + vq_send_element(qi, elem, tosend_len); req->reply_sent = true; return 0; } @@ -453,7 +447,6 @@ static void fv_queue_worker(gpointer data, gpointer user_data) { struct fv_QueueInfo *qi = user_data; struct fuse_session *se = qi->virtio_dev->se; - struct VuDev *dev = &qi->virtio_dev->dev; FVRequest *req = data; VuVirtqElement *elem = &req->elem; struct fuse_buf fbuf = {}; @@ -595,17 +588,9 @@ out: /* If the request has no reply, still recycle the virtqueue element */ if (!req->reply_sent) { - struct VuVirtq *q = vu_get_queue(dev, qi->qidx); - fuse_log(FUSE_LOG_DEBUG, "%s: elem %d no reply sent\n", __func__, elem->index); - - vu_dispatch_rdlock(qi->virtio_dev); - pthread_mutex_lock(&qi->vq_lock); - vu_queue_push(dev, q, elem, 0); - vu_queue_notify(dev, q); - pthread_mutex_unlock(&qi->vq_lock); - vu_dispatch_unlock(qi->virtio_dev); + vq_send_element(qi, elem, 0); } pthread_mutex_destroy(&req->ch.lock); @@ -755,6 +740,18 @@ static void fv_queue_cleanup_thread(struct fv_VuDev *vud, int qidx) vud->qi[qidx] = NULL; } +static void stop_all_queues(struct fv_VuDev *vud) +{ + for (int i = 0; i < vud->nqueues; i++) { + if (!vud->qi[i]) { + continue; + } + + fuse_log(FUSE_LOG_INFO, "%s: Stopping queue %d thread\n", __func__, i); + fv_queue_cleanup_thread(vud, i); + } +} + /* Callback from libvhost-user on start or stop of a queue */ static void fv_queue_set_started(VuDev *dev, int qidx, bool started) { @@ -885,15 +882,7 @@ int virtio_loop(struct fuse_session *se) * Make sure all fv_queue_thread()s quit on exit, as we're about to * free virtio dev and fuse session, no one should access them anymore. */ - for (int i = 0; i < se->virtio_dev->nqueues; i++) { - if (!se->virtio_dev->qi[i]) { - continue; - } - - fuse_log(FUSE_LOG_INFO, "%s: Stopping queue %d thread\n", __func__, i); - fv_queue_cleanup_thread(se->virtio_dev, i); - } - + stop_all_queues(se->virtio_dev); fuse_log(FUSE_LOG_INFO, "%s: Exit\n", __func__); return 0; @@ -999,6 +988,13 @@ static int fv_create_listen_socket(struct fuse_session *se) "vhost socket failed to set group to %s (%d): %m\n", se->vu_socket_group, g->gr_gid); } + } else { + fuse_log(FUSE_LOG_ERR, + "vhost socket: unable to find group '%s'\n", + se->vu_socket_group); + close(listen_sock); + umask(old_umask); + return -1; } } umask(old_umask); diff --git a/tools/virtiofsd/passthrough_ll.c b/tools/virtiofsd/passthrough_ll.c index 38b2af8599..64b5b4fbb1 100644 --- a/tools/virtiofsd/passthrough_ll.c +++ b/tools/virtiofsd/passthrough_ll.c @@ -2465,6 +2465,11 @@ static void lo_flock(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info *fi, * Automatically reversed on read */ #define XATTR_MAP_FLAG_PREFIX (1 << 2) +/* + * The attribute is unsupported; + * ENOTSUP on write, hidden on read. + */ +#define XATTR_MAP_FLAG_UNSUPPORTED (1 << 3) /* scopes */ /* Apply rule to get/set/remove */ @@ -2636,6 +2641,8 @@ static void parse_xattrmap(struct lo_data *lo) tmp_entry.flags |= XATTR_MAP_FLAG_OK; } else if (strstart(map, "bad", &map)) { tmp_entry.flags |= XATTR_MAP_FLAG_BAD; + } else if (strstart(map, "unsupported", &map)) { + tmp_entry.flags |= XATTR_MAP_FLAG_UNSUPPORTED; } else if (strstart(map, "map", &map)) { /* * map is sugar that adds a number of rules, and must be @@ -2646,8 +2653,8 @@ static void parse_xattrmap(struct lo_data *lo) } else { fuse_log(FUSE_LOG_ERR, "%s: Unexpected type;" - "Expecting 'prefix', 'ok', 'bad' or 'map' in rule %zu\n", - __func__, lo->xattr_map_nentries); + "Expecting 'prefix', 'ok', 'bad', 'unsupported' or 'map'" + " in rule %zu\n", __func__, lo->xattr_map_nentries); exit(1); } @@ -2749,6 +2756,9 @@ static int xattr_map_client(const struct lo_data *lo, const char *client_name, if (cur_entry->flags & XATTR_MAP_FLAG_BAD) { return -EPERM; } + if (cur_entry->flags & XATTR_MAP_FLAG_UNSUPPORTED) { + return -ENOTSUP; + } if (cur_entry->flags & XATTR_MAP_FLAG_OK) { /* Unmodified name */ return 0; @@ -2788,7 +2798,8 @@ static int xattr_map_server(const struct lo_data *lo, const char *server_name, if ((cur_entry->flags & XATTR_MAP_FLAG_SERVER) && (strstart(server_name, cur_entry->prepend, &end))) { - if (cur_entry->flags & XATTR_MAP_FLAG_BAD) { + if (cur_entry->flags & XATTR_MAP_FLAG_BAD || + cur_entry->flags & XATTR_MAP_FLAG_UNSUPPORTED) { return -ENODATA; } if (cur_entry->flags & XATTR_MAP_FLAG_OK) { diff --git a/trace-events b/trace-events index c4cca29939..a637a61eba 100644 --- a/trace-events +++ b/trace-events @@ -120,26 +120,16 @@ vcpu guest_cpu_reset(void) # tcg/tcg-op.c # @vaddr: Access' virtual address. -# @info : Access' information (see below). +# @memopidx: Access' information (see below). # # Start virtual memory access (before any potential access violation). -# # Does not include memory accesses performed by devices. # -# Access information can be parsed as: -# -# struct mem_info { -# uint8_t size_shift : 4; /* interpreted as "1 << size_shift" bytes */ -# bool sign_extend: 1; /* sign-extended */ -# uint8_t endianness : 1; /* 0: little, 1: big */ -# bool store : 1; /* whether it is a store operation */ -# pad : 1; -# uint8_t mmuidx : 4; /* mmuidx (softmmu only) */ -# }; -# # Mode: user, softmmu # Targets: TCG(all) -vcpu tcg guest_mem_before(TCGv vaddr, uint16_t info) "info=%d", "vaddr=0x%016"PRIx64" info=%d" +vcpu tcg guest_ld_before(TCGv vaddr, uint32_t memopidx) "info=%d", "vaddr=0x%016"PRIx64" memopidx=0x%x" +vcpu tcg guest_st_before(TCGv vaddr, uint32_t memopidx) "info=%d", "vaddr=0x%016"PRIx64" memopidx=0x%x" +vcpu tcg guest_rmw_before(TCGv vaddr, uint32_t memopidx) "info=%d", "vaddr=0x%016"PRIx64" memopidx=0x%x" # include/user/syscall-trace.h diff --git a/trace/mem.h b/trace/mem.h deleted file mode 100644 index 2f27e7bdf0..0000000000 --- a/trace/mem.h +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Helper functions for guest memory tracing - * - * Copyright (C) 2016 Lluís Vilanova - * - * This work is licensed under the terms of the GNU GPL, version 2 or later. - * See the COPYING file in the top-level directory. - */ - -#ifndef TRACE__MEM_H -#define TRACE__MEM_H - -#include "tcg/tcg.h" - -#define TRACE_MEM_SZ_SHIFT_MASK 0xf /* size shift mask */ -#define TRACE_MEM_SE (1ULL << 4) /* sign extended (y/n) */ -#define TRACE_MEM_BE (1ULL << 5) /* big endian (y/n) */ -#define TRACE_MEM_ST (1ULL << 6) /* store (y/n) */ -#define TRACE_MEM_MMU_SHIFT 8 /* mmu idx */ - -/** - * trace_mem_build_info: - * - * Return a value for the 'info' argument in guest memory access traces. - */ -static inline uint16_t trace_mem_build_info(int size_shift, bool sign_extend, - MemOp endianness, bool store, - unsigned int mmu_idx) -{ - uint16_t res; - - res = size_shift & TRACE_MEM_SZ_SHIFT_MASK; - if (sign_extend) { - res |= TRACE_MEM_SE; - } - if (endianness == MO_BE) { - res |= TRACE_MEM_BE; - } - if (store) { - res |= TRACE_MEM_ST; - } -#ifdef CONFIG_SOFTMMU - res |= mmu_idx << TRACE_MEM_MMU_SHIFT; -#endif - return res; -} - - -/** - * trace_mem_get_info: - * - * Return a value for the 'info' argument in guest memory access traces. - */ -static inline uint16_t trace_mem_get_info(MemOp op, - unsigned int mmu_idx, - bool store) -{ - return trace_mem_build_info(op & MO_SIZE, !!(op & MO_SIGN), - op & MO_BSWAP, store, - mmu_idx); -} - -#endif /* TRACE__MEM_H */ diff --git a/trace/meson.build b/trace/meson.build index e401e7c415..573dd699c6 100644 --- a/trace/meson.build +++ b/trace/meson.build @@ -2,9 +2,8 @@ specific_ss.add(files('control-target.c')) trace_events_files = [] -dtrace = find_program('dtrace', required: 'CONFIG_TRACE_DTRACE' in config_host) foreach dir : [ '.' ] + trace_events_subdirs - trace_events_file = meson.source_root() / dir / 'trace-events' + trace_events_file = meson.project_source_root() / dir / 'trace-events' trace_events_files += [ trace_events_file ] group_name = dir == '.' ? 'root' : dir.underscorify() group = '--group=' + group_name @@ -21,7 +20,7 @@ foreach dir : [ '.' ] + trace_events_subdirs input: trace_events_file, command: [ tracetool, group, '--format=c', '@INPUT@', '@OUTPUT@' ], depend_files: tracetool_depends) - if 'CONFIG_TRACE_UST' in config_host + if 'ust' in get_option('trace_backends') trace_ust_h = custom_target(fmt.format('trace-ust', 'h'), output: fmt.format('trace-ust', 'h'), input: trace_events_file, @@ -31,7 +30,7 @@ foreach dir : [ '.' ] + trace_events_subdirs genh += trace_ust_h endif trace_ss.add(trace_h, trace_c) - if 'CONFIG_TRACE_DTRACE' in config_host + if 'dtrace' in get_option('trace_backends') trace_dtrace = custom_target(fmt.format('trace-dtrace', 'dtrace'), output: fmt.format('trace-dtrace', 'dtrace'), input: trace_events_file, @@ -70,13 +69,13 @@ foreach d : [ ] gen = custom_target(d[0], output: d[0], - input: meson.source_root() / 'trace-events', + input: meson.project_source_root() / 'trace-events', command: [ tracetool, '--group=root', '--format=@0@'.format(d[1]), '@INPUT@', '@OUTPUT@' ], depend_files: tracetool_depends) specific_ss.add(when: 'CONFIG_TCG', if_true: gen) endforeach -if 'CONFIG_TRACE_UST' in config_host +if 'ust' in get_option('trace_backends') trace_ust_all_h = custom_target('trace-ust-all.h', output: 'trace-ust-all.h', input: trace_events_files, @@ -91,7 +90,11 @@ if 'CONFIG_TRACE_UST' in config_host genh += trace_ust_all_h endif -trace_ss.add(when: 'CONFIG_TRACE_SIMPLE', if_true: files('simple.c')) -trace_ss.add(when: 'CONFIG_TRACE_FTRACE', if_true: files('ftrace.c')) +if 'simple' in get_option('trace_backends') + trace_ss.add(files('simple.c')) +endif +if 'ftrace' in get_option('trace_backends') + trace_ss.add(files('ftrace.c')) +endif trace_ss.add(files('control.c')) trace_ss.add(files('qmp.c')) diff --git a/trace/simple.c b/trace/simple.c index ac499edee0..18af590cf7 100644 --- a/trace/simple.c +++ b/trace/simple.c @@ -364,7 +364,7 @@ void st_set_trace_file(const char *file) if (!file) { /* Type cast needed for Windows where getpid() returns an int. */ - trace_file_name = g_strdup_printf(CONFIG_TRACE_FILE, (pid_t)getpid()); + trace_file_name = g_strdup_printf(CONFIG_TRACE_FILE "-" FMT_pid, (pid_t)getpid()); } else { trace_file_name = g_strdup_printf("%s", file); } diff --git a/ui/console.c b/ui/console.c index eabbbc951c..29a3e3f0f5 100644 --- a/ui/console.c +++ b/ui/console.c @@ -27,10 +27,12 @@ #include "hw/qdev-core.h" #include "qapi/error.h" #include "qapi/qapi-commands-ui.h" +#include "qemu/fifo8.h" +#include "qemu/main-loop.h" #include "qemu/module.h" #include "qemu/option.h" #include "qemu/timer.h" -#include "chardev/char-fe.h" +#include "chardev/char.h" #include "trace.h" #include "exec/memory.h" #include "io/channel-file.h" @@ -62,57 +64,6 @@ enum TTYState { TTY_STATE_CSI, }; -typedef struct QEMUFIFO { - uint8_t *buf; - int buf_size; - int count, wptr, rptr; -} QEMUFIFO; - -static int qemu_fifo_write(QEMUFIFO *f, const uint8_t *buf, int len1) -{ - int l, len; - - l = f->buf_size - f->count; - if (len1 > l) - len1 = l; - len = len1; - while (len > 0) { - l = f->buf_size - f->wptr; - if (l > len) - l = len; - memcpy(f->buf + f->wptr, buf, l); - f->wptr += l; - if (f->wptr >= f->buf_size) - f->wptr = 0; - buf += l; - len -= l; - } - f->count += len1; - return len1; -} - -static int qemu_fifo_read(QEMUFIFO *f, uint8_t *buf, int len1) -{ - int l, len; - - if (len1 > f->count) - len1 = f->count; - len = len1; - while (len > 0) { - l = f->buf_size - f->rptr; - if (l > len) - l = len; - memcpy(buf, f->buf + f->rptr, l); - f->rptr += l; - if (f->rptr >= f->buf_size) - f->rptr = 0; - buf += l; - len -= l; - } - f->count -= len1; - return len1; -} - typedef enum { GRAPHIC_CONSOLE, TEXT_CONSOLE, @@ -165,9 +116,7 @@ struct QemuConsole { Chardev *chr; /* fifo for key pressed */ - QEMUFIFO out_fifo; - uint8_t out_fifo_buf[16]; - QEMUTimer *kbd_timer; + Fifo8 out_fifo; CoQueue dump_queue; QTAILQ_ENTRY(QemuConsole) next; @@ -1157,25 +1106,20 @@ static int vc_chr_write(Chardev *chr, const uint8_t *buf, int len) return len; } -static void kbd_send_chars(void *opaque) +static void kbd_send_chars(QemuConsole *s) { - QemuConsole *s = opaque; - int len; - uint8_t buf[16]; + uint32_t len, avail; len = qemu_chr_be_can_write(s->chr); - if (len > s->out_fifo.count) - len = s->out_fifo.count; - if (len > 0) { - if (len > sizeof(buf)) - len = sizeof(buf); - qemu_fifo_read(&s->out_fifo, buf, len); - qemu_chr_be_write(s->chr, buf, len); - } - /* characters are pending: we send them a bit later (XXX: - horrible, should change char device API) */ - if (s->out_fifo.count > 0) { - timer_mod(s->kbd_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + 1); + avail = fifo8_num_used(&s->out_fifo); + while (len > 0 && avail > 0) { + const uint8_t *buf; + uint32_t size; + + buf = fifo8_pop_buf(&s->out_fifo, MIN(len, avail), &size); + qemu_chr_be_write(s->chr, (uint8_t *)buf, size); + len = qemu_chr_be_can_write(s->chr); + avail -= size; } } @@ -1183,8 +1127,8 @@ static void kbd_send_chars(void *opaque) void kbd_put_keysym_console(QemuConsole *s, int keysym) { uint8_t buf[16], *q; - CharBackend *be; int c; + uint32_t num_free; if (!s || (s->console_type == GRAPHIC_CONSOLE)) return; @@ -1226,11 +1170,9 @@ void kbd_put_keysym_console(QemuConsole *s, int keysym) if (s->echo) { vc_chr_write(s->chr, buf, q - buf); } - be = s->chr->be; - if (be && be->chr_read) { - qemu_fifo_write(&s->out_fifo, buf, q - buf); - kbd_send_chars(s); - } + num_free = fifo8_num_free(&s->out_fifo); + fifo8_push_all(&s->out_fifo, buf, MIN(num_free, q - buf)); + kbd_send_chars(s); break; } } @@ -2186,6 +2128,14 @@ int qemu_console_get_height(QemuConsole *con, int fallback) return con ? surface_height(con->surface) : fallback; } +static void vc_chr_accept_input(Chardev *chr) +{ + VCChardev *drv = VC_CHARDEV(chr); + QemuConsole *s = drv->console; + + kbd_send_chars(s); +} + static void vc_chr_set_echo(Chardev *chr, bool echo) { VCChardev *drv = VC_CHARDEV(chr); @@ -2233,9 +2183,7 @@ static void text_console_do_init(Chardev *chr, DisplayState *ds) int g_width = 80 * FONT_WIDTH; int g_height = 24 * FONT_HEIGHT; - s->out_fifo.buf = s->out_fifo_buf; - s->out_fifo.buf_size = sizeof(s->out_fifo_buf); - s->kbd_timer = timer_new_ms(QEMU_CLOCK_REALTIME, kbd_send_chars, s); + fifo8_create(&s->out_fifo, 16); s->ds = ds; s->y_displayed = 0; @@ -2485,6 +2433,7 @@ static void char_vc_class_init(ObjectClass *oc, void *data) cc->parse = qemu_chr_parse_vc; cc->open = vc_chr_open; cc->chr_write = vc_chr_write; + cc->chr_accept_input = vc_chr_accept_input; cc->chr_set_echo = vc_chr_set_echo; } diff --git a/ui/egl-helpers.c b/ui/egl-helpers.c index 385a3fa752..3a88245b67 100644 --- a/ui/egl-helpers.c +++ b/ui/egl-helpers.c @@ -90,14 +90,31 @@ void egl_fb_setup_new_tex(egl_fb *fb, int width, int height) void egl_fb_blit(egl_fb *dst, egl_fb *src, bool flip) { - GLuint y1, y2; + GLuint x1 = 0; + GLuint y1 = 0; + GLuint x2, y2; + GLuint w = src->width; + GLuint h = src->height; glBindFramebuffer(GL_READ_FRAMEBUFFER, src->framebuffer); glBindFramebuffer(GL_DRAW_FRAMEBUFFER, dst->framebuffer); glViewport(0, 0, dst->width, dst->height); - y1 = flip ? src->height : 0; - y2 = flip ? 0 : src->height; - glBlitFramebuffer(0, y1, src->width, y2, + + if (src->dmabuf) { + x1 = src->dmabuf->x; + y1 = src->dmabuf->y; + w = src->dmabuf->scanout_width; + h = src->dmabuf->scanout_height; + } + + w = (x1 + w) > src->width ? src->width - x1 : w; + h = (y1 + h) > src->height ? src->height - y1 : h; + + y2 = flip ? y1 : h + y1; + y1 = flip ? h + y1 : y1; + x2 = x1 + w; + + glBlitFramebuffer(x1, y1, x2, y2, 0, 0, dst->width, dst->height, GL_COLOR_BUFFER_BIT, GL_LINEAR); } diff --git a/ui/gtk-egl.c b/ui/gtk-egl.c index 72ce5e1f8f..45cb67712d 100644 --- a/ui/gtk-egl.c +++ b/ui/gtk-egl.c @@ -63,6 +63,9 @@ void gd_egl_init(VirtualConsole *vc) void gd_egl_draw(VirtualConsole *vc) { GdkWindow *window; +#ifdef CONFIG_GBM + QemuDmaBuf *dmabuf = vc->gfx.guest_fb.dmabuf; +#endif int ww, wh; if (!vc->gfx.gls) { @@ -74,10 +77,31 @@ void gd_egl_draw(VirtualConsole *vc) wh = gdk_window_get_height(window); if (vc->gfx.scanout_mode) { +#ifdef CONFIG_GBM + if (dmabuf) { + if (!dmabuf->draw_submitted) { + return; + } else { + dmabuf->draw_submitted = false; + } + } +#endif gd_egl_scanout_flush(&vc->gfx.dcl, 0, 0, vc->gfx.w, vc->gfx.h); vc->gfx.scale_x = (double)ww / vc->gfx.w; vc->gfx.scale_y = (double)wh / vc->gfx.h; + + glFlush(); +#ifdef CONFIG_GBM + if (dmabuf) { + egl_dmabuf_create_fence(dmabuf); + if (dmabuf->fence_fd > 0) { + qemu_set_fd_handler(dmabuf->fence_fd, gd_hw_gl_flushed, NULL, vc); + return; + } + graphic_hw_gl_block(vc->gfx.dcl.con, false); + } +#endif } else { if (!vc->gfx.ds) { return; @@ -92,21 +116,10 @@ void gd_egl_draw(VirtualConsole *vc) vc->gfx.scale_x = (double)ww / surface_width(vc->gfx.ds); vc->gfx.scale_y = (double)wh / surface_height(vc->gfx.ds); + + glFlush(); } - glFlush(); -#ifdef CONFIG_GBM - if (vc->gfx.guest_fb.dmabuf) { - QemuDmaBuf *dmabuf = vc->gfx.guest_fb.dmabuf; - - egl_dmabuf_create_fence(dmabuf); - if (dmabuf->fence_fd > 0) { - qemu_set_fd_handler(dmabuf->fence_fd, gd_hw_gl_flushed, NULL, vc); - return; - } - graphic_hw_gl_block(vc->gfx.dcl.con, false); - } -#endif graphic_hw_gl_flushed(vc->gfx.dcl.con); } @@ -139,8 +152,15 @@ void gd_egl_refresh(DisplayChangeListener *dcl) } vc->gfx.gls = qemu_gl_init_shader(); if (vc->gfx.ds) { + surface_gl_destroy_texture(vc->gfx.gls, vc->gfx.ds); surface_gl_create_texture(vc->gfx.gls, vc->gfx.ds); } +#ifdef CONFIG_GBM + if (vc->gfx.guest_fb.dmabuf) { + egl_dmabuf_release_texture(vc->gfx.guest_fb.dmabuf); + gd_egl_scanout_dmabuf(dcl, vc->gfx.guest_fb.dmabuf); + } +#endif } graphic_hw_update(dcl->con); @@ -165,6 +185,8 @@ void gd_egl_switch(DisplayChangeListener *dcl, surface_height(vc->gfx.ds) == surface_height(surface)) { resized = false; } + eglMakeCurrent(qemu_egl_display, vc->gfx.esurface, + vc->gfx.esurface, vc->gfx.ectx); surface_gl_destroy_texture(vc->gfx.gls, vc->gfx.ds); vc->gfx.ds = surface; @@ -224,6 +246,9 @@ void gd_egl_scanout_dmabuf(DisplayChangeListener *dcl, #ifdef CONFIG_GBM VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); + eglMakeCurrent(qemu_egl_display, vc->gfx.esurface, + vc->gfx.esurface, vc->gfx.ectx); + egl_dmabuf_import_texture(dmabuf); if (!dmabuf->texture) { return; @@ -317,6 +342,7 @@ void gd_egl_flush(DisplayChangeListener *dcl, if (vc->gfx.guest_fb.dmabuf) { graphic_hw_gl_block(vc->gfx.dcl.con, true); + vc->gfx.guest_fb.dmabuf->draw_submitted = true; gtk_widget_queue_draw_area(area, x, y, w, h); return; } diff --git a/ui/gtk-gl-area.c b/ui/gtk-gl-area.c index b23523748e..461da7712f 100644 --- a/ui/gtk-gl-area.c +++ b/ui/gtk-gl-area.c @@ -38,6 +38,9 @@ static void gtk_gl_area_set_scanout_mode(VirtualConsole *vc, bool scanout) void gd_gl_area_draw(VirtualConsole *vc) { +#ifdef CONFIG_GBM + QemuDmaBuf *dmabuf = vc->gfx.guest_fb.dmabuf; +#endif int ww, wh, y1, y2; if (!vc->gfx.gls) { @@ -53,6 +56,16 @@ void gd_gl_area_draw(VirtualConsole *vc) return; } +#ifdef CONFIG_GBM + if (dmabuf) { + if (!dmabuf->draw_submitted) { + return; + } else { + dmabuf->draw_submitted = false; + } + } +#endif + glBindFramebuffer(GL_READ_FRAMEBUFFER, vc->gfx.guest_fb.framebuffer); /* GtkGLArea sets GL_DRAW_FRAMEBUFFER for us */ @@ -62,6 +75,22 @@ void gd_gl_area_draw(VirtualConsole *vc) glBlitFramebuffer(0, y1, vc->gfx.w, y2, 0, 0, ww, wh, GL_COLOR_BUFFER_BIT, GL_NEAREST); +#ifdef CONFIG_GBM + if (dmabuf) { + egl_dmabuf_create_sync(dmabuf); + } +#endif + glFlush(); +#ifdef CONFIG_GBM + if (dmabuf) { + egl_dmabuf_create_fence(dmabuf); + if (dmabuf->fence_fd > 0) { + qemu_set_fd_handler(dmabuf->fence_fd, gd_hw_gl_flushed, NULL, vc); + return; + } + graphic_hw_gl_block(vc->gfx.dcl.con, false); + } +#endif } else { if (!vc->gfx.ds) { return; @@ -72,25 +101,6 @@ void gd_gl_area_draw(VirtualConsole *vc) surface_gl_render_texture(vc->gfx.gls, vc->gfx.ds); } -#ifdef CONFIG_GBM - if (vc->gfx.guest_fb.dmabuf) { - egl_dmabuf_create_sync(vc->gfx.guest_fb.dmabuf); - } -#endif - - glFlush(); -#ifdef CONFIG_GBM - if (vc->gfx.guest_fb.dmabuf) { - QemuDmaBuf *dmabuf = vc->gfx.guest_fb.dmabuf; - - egl_dmabuf_create_fence(dmabuf); - if (dmabuf->fence_fd > 0) { - qemu_set_fd_handler(dmabuf->fence_fd, gd_hw_gl_flushed, NULL, vc); - return; - } - graphic_hw_gl_block(vc->gfx.dcl.con, false); - } -#endif graphic_hw_gl_flushed(vc->gfx.dcl.con); } @@ -112,6 +122,9 @@ void gd_gl_area_refresh(DisplayChangeListener *dcl) { VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); + vc->gfx.dcl.update_interval = gd_monitor_update_interval( + vc->window ? vc->window : vc->gfx.drawing_area); + if (!vc->gfx.gls) { if (!gtk_widget_get_realized(vc->gfx.drawing_area)) { return; @@ -234,6 +247,7 @@ void gd_gl_area_scanout_flush(DisplayChangeListener *dcl, if (vc->gfx.guest_fb.dmabuf) { graphic_hw_gl_block(vc->gfx.dcl.con, true); + vc->gfx.guest_fb.dmabuf->draw_submitted = true; } gtk_gl_area_queue_render(GTK_GL_AREA(vc->gfx.drawing_area)); } diff --git a/ui/gtk.c b/ui/gtk.c index b0564d80c1..d2892ea6b4 100644 --- a/ui/gtk.c +++ b/ui/gtk.c @@ -778,6 +778,9 @@ static gboolean gd_draw_event(GtkWidget *widget, cairo_t *cr, void *opaque) if (!vc->gfx.ds) { return FALSE; } + if (!vc->gfx.surface) { + return FALSE; + } vc->gfx.dcl.update_interval = gd_monitor_update_interval(vc->window ? vc->window : s->window); @@ -1242,6 +1245,16 @@ static gboolean gd_tab_window_close(GtkWidget *widget, GdkEvent *event, vc->tab_item, vc->label); gtk_widget_destroy(vc->window); vc->window = NULL; +#if defined(CONFIG_OPENGL) + if (vc->gfx.esurface) { + eglDestroySurface(qemu_egl_display, vc->gfx.esurface); + vc->gfx.esurface = NULL; + } + if (vc->gfx.ectx) { + eglDestroyContext(qemu_egl_display, vc->gfx.ectx); + vc->gfx.ectx = NULL; + } +#endif return TRUE; } @@ -1271,6 +1284,16 @@ static void gd_menu_untabify(GtkMenuItem *item, void *opaque) if (!vc->window) { gtk_widget_set_sensitive(vc->menu_item, false); vc->window = gtk_window_new(GTK_WINDOW_TOPLEVEL); +#if defined(CONFIG_OPENGL) + if (vc->gfx.esurface) { + eglDestroySurface(qemu_egl_display, vc->gfx.esurface); + vc->gfx.esurface = NULL; + } + if (vc->gfx.esurface) { + eglDestroyContext(qemu_egl_display, vc->gfx.ectx); + vc->gfx.ectx = NULL; + } +#endif gd_widget_reparent(s->notebook, vc->window, vc->tab_item); g_signal_connect(vc->window, "delete-event", diff --git a/ui/meson.build b/ui/meson.build index a73beb0e54..ee8ef27714 100644 --- a/ui/meson.build +++ b/ui/meson.build @@ -89,7 +89,7 @@ if sdl.found() ui_modules += {'sdl' : sdl_ss} endif -if config_host.has_key('CONFIG_SPICE') +if spice.found() spice_core_ss = ss.source_set() spice_core_ss.add(spice, pixman, files( 'spice-core.c', @@ -99,7 +99,7 @@ if config_host.has_key('CONFIG_SPICE') ui_modules += {'spice-core' : spice_core_ss} endif -if config_host.has_key('CONFIG_SPICE') and config_host.has_key('CONFIG_GIO') +if spice.found() and config_host.has_key('CONFIG_GIO') spice_ss = ss.source_set() spice_ss.add(spice, gio, pixman, files('spice-app.c')) ui_modules += {'spice-app': spice_ss} diff --git a/util/compatfd.c b/util/compatfd.c index a8ec525c6c..ab810c42a9 100644 --- a/util/compatfd.c +++ b/util/compatfd.c @@ -17,7 +17,7 @@ #include "qemu/thread.h" #if defined(CONFIG_SIGNALFD) -#include +#include #endif struct sigfd_compat_info { @@ -96,9 +96,8 @@ int qemu_signalfd(const sigset_t *mask) #if defined(CONFIG_SIGNALFD) int ret; - ret = syscall(SYS_signalfd, -1, mask, _NSIG / 8); + ret = signalfd(-1, mask, SFD_CLOEXEC); if (ret != -1) { - qemu_set_cloexec(ret); return ret; } #endif diff --git a/util/host-utils.c b/util/host-utils.c index 7b9322071d..bcc772b8ec 100644 --- a/util/host-utils.c +++ b/util/host-utils.c @@ -86,78 +86,119 @@ void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b) *phigh = rh; } -/* Unsigned 128x64 division. Returns 1 if overflow (divide by zero or */ -/* quotient exceeds 64 bits). Otherwise returns quotient via plow and */ -/* remainder via phigh. */ -int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor) +/* + * Unsigned 128-by-64 division. + * Returns the remainder. + * Returns quotient via plow and phigh. + * Also returns the remainder via the function return value. + */ +uint64_t divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor) { uint64_t dhi = *phigh; uint64_t dlo = *plow; - unsigned i; - uint64_t carry = 0; + uint64_t rem, dhighest; + int sh; - if (divisor == 0) { - return 1; - } else if (dhi == 0) { + if (divisor == 0 || dhi == 0) { *plow = dlo / divisor; - *phigh = dlo % divisor; - return 0; - } else if (dhi > divisor) { - return 1; + *phigh = 0; + return dlo % divisor; } else { + sh = clz64(divisor); - for (i = 0; i < 64; i++) { - carry = dhi >> 63; - dhi = (dhi << 1) | (dlo >> 63); - if (carry || (dhi >= divisor)) { - dhi -= divisor; - carry = 1; - } else { - carry = 0; + if (dhi < divisor) { + if (sh != 0) { + /* normalize the divisor, shifting the dividend accordingly */ + divisor <<= sh; + dhi = (dhi << sh) | (dlo >> (64 - sh)); + dlo <<= sh; } - dlo = (dlo << 1) | carry; + + *phigh = 0; + *plow = udiv_qrnnd(&rem, dhi, dlo, divisor); + } else { + if (sh != 0) { + /* normalize the divisor, shifting the dividend accordingly */ + divisor <<= sh; + dhighest = dhi >> (64 - sh); + dhi = (dhi << sh) | (dlo >> (64 - sh)); + dlo <<= sh; + + *phigh = udiv_qrnnd(&dhi, dhighest, dhi, divisor); + } else { + /** + * dhi >= divisor + * Since the MSB of divisor is set (sh == 0), + * (dhi - divisor) < divisor + * + * Thus, the high part of the quotient is 1, and we can + * calculate the low part with a single call to udiv_qrnnd + * after subtracting divisor from dhi + */ + dhi -= divisor; + *phigh = 1; + } + + *plow = udiv_qrnnd(&rem, dhi, dlo, divisor); } - *plow = dlo; - *phigh = dhi; - return 0; + /* + * since the dividend/divisor might have been normalized, + * the remainder might also have to be shifted back + */ + return rem >> sh; } } -int divs128(int64_t *plow, int64_t *phigh, int64_t divisor) +/* + * Signed 128-by-64 division. + * Returns quotient via plow and phigh. + * Also returns the remainder via the function return value. + */ +int64_t divs128(uint64_t *plow, int64_t *phigh, int64_t divisor) { - int sgn_dvdnd = *phigh < 0; - int sgn_divsr = divisor < 0; - int overflow = 0; + bool neg_quotient = false, neg_remainder = false; + uint64_t unsig_hi = *phigh, unsig_lo = *plow; + uint64_t rem; - if (sgn_dvdnd) { - *plow = ~(*plow); - *phigh = ~(*phigh); - if (*plow == (int64_t)-1) { - *plow = 0; - (*phigh)++; - } else { - (*plow)++; - } - } + if (*phigh < 0) { + neg_quotient = !neg_quotient; + neg_remainder = !neg_remainder; - if (sgn_divsr) { - divisor = 0 - divisor; - } - - overflow = divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor); - - if (sgn_dvdnd ^ sgn_divsr) { - *plow = 0 - *plow; - } - - if (!overflow) { - if ((*plow < 0) ^ (sgn_dvdnd ^ sgn_divsr)) { - overflow = 1; + if (unsig_lo == 0) { + unsig_hi = -unsig_hi; + } else { + unsig_hi = ~unsig_hi; + unsig_lo = -unsig_lo; } } - return overflow; + if (divisor < 0) { + neg_quotient = !neg_quotient; + + divisor = -divisor; + } + + rem = divu128(&unsig_lo, &unsig_hi, (uint64_t)divisor); + + if (neg_quotient) { + if (unsig_lo == 0) { + *phigh = -unsig_hi; + *plow = 0; + } else { + *phigh = ~unsig_hi; + *plow = -unsig_lo; + } + } else { + *phigh = unsig_hi; + *plow = unsig_lo; + } + + if (neg_remainder) { + return -rem; + } else { + return rem; + } } #endif diff --git a/util/iova-tree.c b/util/iova-tree.c index 7990692cbd..23ea35b7a4 100644 --- a/util/iova-tree.c +++ b/util/iova-tree.c @@ -42,14 +42,14 @@ IOVATree *iova_tree_new(void) return iova_tree; } -DMAMap *iova_tree_find(IOVATree *tree, DMAMap *map) +const DMAMap *iova_tree_find(const IOVATree *tree, const DMAMap *map) { return g_tree_lookup(tree->tree, map); } -DMAMap *iova_tree_find_address(IOVATree *tree, hwaddr iova) +const DMAMap *iova_tree_find_address(const IOVATree *tree, hwaddr iova) { - DMAMap map = { .iova = iova, .size = 0 }; + const DMAMap map = { .iova = iova, .size = 0 }; return iova_tree_find(tree, &map); } @@ -60,7 +60,7 @@ static inline void iova_tree_insert_internal(GTree *gtree, DMAMap *range) g_tree_insert(gtree, range, range); } -int iova_tree_insert(IOVATree *tree, DMAMap *map) +int iova_tree_insert(IOVATree *tree, const DMAMap *map) { DMAMap *new; @@ -96,9 +96,9 @@ void iova_tree_foreach(IOVATree *tree, iova_tree_iterator iterator) g_tree_foreach(tree->tree, iova_tree_traverse, iterator); } -int iova_tree_remove(IOVATree *tree, DMAMap *map) +int iova_tree_remove(IOVATree *tree, const DMAMap *map) { - DMAMap *overlap; + const DMAMap *overlap; while ((overlap = iova_tree_find(tree, map))) { g_tree_remove(tree->tree, overlap); diff --git a/util/meson.build b/util/meson.build index 779f413c86..05b593055a 100644 --- a/util/meson.build +++ b/util/meson.build @@ -1,5 +1,7 @@ util_ss.add(files('osdep.c', 'cutils.c', 'unicode.c', 'qemu-timer-common.c')) -util_ss.add(when: 'CONFIG_ATOMIC64', if_false: files('atomic64.c')) +if not config_host_data.get('CONFIG_ATOMIC64') + util_ss.add(files('atomic64.c')) +endif util_ss.add(when: 'CONFIG_POSIX', if_true: files('aio-posix.c')) util_ss.add(when: 'CONFIG_POSIX', if_true: files('fdmon-poll.c')) if config_host_data.get('CONFIG_EPOLL_CREATE1') diff --git a/util/qemu-option.c b/util/qemu-option.c index 61cb4a97bd..eedd08929b 100644 --- a/util/qemu-option.c +++ b/util/qemu-option.c @@ -1126,11 +1126,11 @@ int qemu_opts_foreach(QemuOptsList *list, qemu_opts_loopfunc func, void *opaque, Error **errp) { Location loc; - QemuOpts *opts; + QemuOpts *opts, *next; int rc = 0; loc_push_none(&loc); - QTAILQ_FOREACH(opts, &list->head, next) { + QTAILQ_FOREACH_SAFE(opts, &list->head, next, next) { loc_restore(&opts->loc); rc = func(opaque, opts, errp); if (rc) { diff --git a/util/qemu-sockets.c b/util/qemu-sockets.c index 72216ef980..0585e7a629 100644 --- a/util/qemu-sockets.c +++ b/util/qemu-sockets.c @@ -278,7 +278,7 @@ static int inet_listen_saddr(InetSocketAddress *saddr, /* create socket + bind/listen */ for (e = res; e != NULL; e = e->ai_next) { -#ifdef IPPROTO_MPTCP +#ifdef HAVE_IPPROTO_MPTCP if (saddr->has_mptcp && saddr->mptcp) { e->ai_protocol = IPPROTO_MPTCP; } @@ -462,7 +462,7 @@ int inet_connect_saddr(InetSocketAddress *saddr, Error **errp) error_free(local_err); local_err = NULL; -#ifdef IPPROTO_MPTCP +#ifdef HAVE_IPPROTO_MPTCP if (saddr->has_mptcp && saddr->mptcp) { e->ai_protocol = IPPROTO_MPTCP; } @@ -699,7 +699,7 @@ int inet_parse(InetSocketAddress *addr, const char *str, Error **errp) } addr->has_keep_alive = true; } -#ifdef IPPROTO_MPTCP +#ifdef HAVE_IPPROTO_MPTCP begin = strstr(optstr, ",mptcp"); if (begin) { if (inet_parse_flag("mptcp", begin + strlen(",mptcp"), diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c index 6c5004220d..e1225b63bd 100644 --- a/util/qemu-thread-posix.c +++ b/util/qemu-thread-posix.c @@ -23,7 +23,8 @@ void qemu_thread_naming(bool enable) { name_threads = enable; -#ifndef CONFIG_THREAD_SETNAME_BYTHREAD +#if !defined CONFIG_PTHREAD_SETNAME_NP_W_TID && \ + !defined CONFIG_PTHREAD_SETNAME_NP_WO_TID /* This is a debugging option, not fatal */ if (enable) { fprintf(stderr, "qemu: thread naming not supported on this host\n"); @@ -522,7 +523,6 @@ static void *qemu_thread_start(void *args) void *arg = qemu_thread_args->arg; void *r; -#ifdef CONFIG_THREAD_SETNAME_BYTHREAD /* Attempt to set the threads name; note that this is for debug, so * we're not going to fail if we can't set it. */ @@ -533,7 +533,6 @@ static void *qemu_thread_start(void *args) pthread_setname_np(qemu_thread_args->name); # endif } -#endif QEMU_TSAN_ANNOTATE_THREAD_NAME(qemu_thread_args->name); g_free(qemu_thread_args->name); g_free(qemu_thread_args); diff --git a/util/rcu.c b/util/rcu.c index 13ac0f75cb..c91da9f137 100644 --- a/util/rcu.c +++ b/util/rcu.c @@ -46,6 +46,7 @@ unsigned long rcu_gp_ctr = RCU_GP_LOCKED; QemuEvent rcu_gp_event; +static int in_drain_call_rcu; static QemuMutex rcu_registry_lock; static QemuMutex rcu_sync_lock; @@ -107,6 +108,8 @@ static void wait_for_readers(void) * get some extra futex wakeups. */ qatomic_set(&index->waiting, false); + } else if (qatomic_read(&in_drain_call_rcu)) { + notifier_list_notify(&index->force_rcu, NULL); } } @@ -339,8 +342,10 @@ void drain_call_rcu(void) * assumed. */ + qatomic_inc(&in_drain_call_rcu); call_rcu1(&rcu_drain.rcu, drain_rcu_callback); qemu_event_wait(&rcu_drain.drain_complete_event); + qatomic_dec(&in_drain_call_rcu); if (locked) { qemu_mutex_lock_iothread(); @@ -363,6 +368,20 @@ void rcu_unregister_thread(void) qemu_mutex_unlock(&rcu_registry_lock); } +void rcu_add_force_rcu_notifier(Notifier *n) +{ + qemu_mutex_lock(&rcu_registry_lock); + notifier_list_add(&rcu_reader.force_rcu, n); + qemu_mutex_unlock(&rcu_registry_lock); +} + +void rcu_remove_force_rcu_notifier(Notifier *n) +{ + qemu_mutex_lock(&rcu_registry_lock); + notifier_remove(n); + qemu_mutex_unlock(&rcu_registry_lock); +} + static void rcu_init_complete(void) { QemuThread thread;